1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81#include <linux/module.h>
82#include <linux/reboot.h>
83#include <linux/spinlock.h>
84#include <linux/interrupt.h>
85#include <linux/moduleparam.h>
86#include <linux/errno.h>
87#include <linux/types.h>
88#include <linux/delay.h>
89#include <linux/pci.h>
90#include <linux/time.h>
91#include <linux/mutex.h>
92#include <linux/slab.h>
93#include <asm/io.h>
94#include <asm/irq.h>
95#include <linux/uaccess.h>
96#include <scsi/scsi.h>
97#include <scsi/scsi_host.h>
98#include <scsi/scsi_tcq.h>
99#include <scsi/scsi_cmnd.h>
100#include "3w-9xxx.h"
101
102
103#define TW_DRIVER_VERSION "2.26.02.014"
104static DEFINE_MUTEX(twa_chrdev_mutex);
105static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
106static unsigned int twa_device_extension_count;
107static int twa_major = -1;
108extern struct timezone sys_tz;
109
110
111MODULE_AUTHOR ("LSI");
112MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
113MODULE_LICENSE("GPL");
114MODULE_VERSION(TW_DRIVER_VERSION);
115
116static int use_msi = 0;
117module_param(use_msi, int, S_IRUGO);
118MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
119
120
121static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
122static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
123static char *twa_aen_severity_lookup(unsigned char severity_code);
124static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
125static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
126static int twa_chrdev_open(struct inode *inode, struct file *file);
127static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
128static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
129static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
130static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
131 u32 set_features, unsigned short current_fw_srl,
132 unsigned short current_fw_arch_id,
133 unsigned short current_fw_branch,
134 unsigned short current_fw_build,
135 unsigned short *fw_on_ctlr_srl,
136 unsigned short *fw_on_ctlr_arch_id,
137 unsigned short *fw_on_ctlr_branch,
138 unsigned short *fw_on_ctlr_build,
139 u32 *init_connect_result);
140static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
141static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
142static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
143static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
144static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
145static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
146static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
147static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
148static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
149
150
151
152
153static ssize_t twa_show_stats(struct device *dev,
154 struct device_attribute *attr, char *buf)
155{
156 struct Scsi_Host *host = class_to_shost(dev);
157 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
158 unsigned long flags = 0;
159 ssize_t len;
160
161 spin_lock_irqsave(tw_dev->host->host_lock, flags);
162 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
163 "Current commands posted: %4d\n"
164 "Max commands posted: %4d\n"
165 "Current pending commands: %4d\n"
166 "Max pending commands: %4d\n"
167 "Last sgl length: %4d\n"
168 "Max sgl length: %4d\n"
169 "Last sector count: %4d\n"
170 "Max sector count: %4d\n"
171 "SCSI Host Resets: %4d\n"
172 "AEN's: %4d\n",
173 TW_DRIVER_VERSION,
174 tw_dev->posted_request_count,
175 tw_dev->max_posted_request_count,
176 tw_dev->pending_request_count,
177 tw_dev->max_pending_request_count,
178 tw_dev->sgl_entries,
179 tw_dev->max_sgl_entries,
180 tw_dev->sector_count,
181 tw_dev->max_sector_count,
182 tw_dev->num_resets,
183 tw_dev->aen_count);
184 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
185 return len;
186}
187
188
189static struct device_attribute twa_host_stats_attr = {
190 .attr = {
191 .name = "stats",
192 .mode = S_IRUGO,
193 },
194 .show = twa_show_stats
195};
196
197
198static struct device_attribute *twa_host_attrs[] = {
199 &twa_host_stats_attr,
200 NULL,
201};
202
203
204static const struct file_operations twa_fops = {
205 .owner = THIS_MODULE,
206 .unlocked_ioctl = twa_chrdev_ioctl,
207 .open = twa_chrdev_open,
208 .release = NULL,
209 .llseek = noop_llseek,
210};
211
212
213
214
215
216
217static bool twa_command_mapped(struct scsi_cmnd *cmd)
218{
219 return scsi_sg_count(cmd) != 1 ||
220 scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
221}
222
223
224static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
225{
226 TW_Command_Full *full_command_packet;
227 TW_Command *command_packet;
228 TW_Command_Apache_Header *header;
229 unsigned short aen;
230 int retval = 1;
231
232 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
233 tw_dev->posted_request_count--;
234 aen = le16_to_cpu(header->status_block.error);
235 full_command_packet = tw_dev->command_packet_virt[request_id];
236 command_packet = &full_command_packet->command.oldcommand;
237
238
239 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
240
241 if (twa_aen_read_queue(tw_dev, request_id))
242 goto out2;
243 else {
244 retval = 0;
245 goto out;
246 }
247 }
248
249 switch (aen) {
250 case TW_AEN_QUEUE_EMPTY:
251
252 break;
253 case TW_AEN_SYNC_TIME_WITH_HOST:
254 twa_aen_sync_time(tw_dev, request_id);
255 retval = 0;
256 goto out;
257 default:
258 twa_aen_queue_event(tw_dev, header);
259
260
261 if (twa_aen_read_queue(tw_dev, request_id))
262 goto out2;
263 else {
264 retval = 0;
265 goto out;
266 }
267 }
268 retval = 0;
269out2:
270 tw_dev->state[request_id] = TW_S_COMPLETED;
271 twa_free_request_id(tw_dev, request_id);
272 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
273out:
274 return retval;
275}
276
277
278static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
279{
280 int request_id = 0;
281 char cdb[TW_MAX_CDB_LEN];
282 TW_SG_Entry sglist[1];
283 int finished = 0, count = 0;
284 TW_Command_Full *full_command_packet;
285 TW_Command_Apache_Header *header;
286 unsigned short aen;
287 int first_reset = 0, queue = 0, retval = 1;
288
289 if (no_check_reset)
290 first_reset = 0;
291 else
292 first_reset = 1;
293
294 full_command_packet = tw_dev->command_packet_virt[request_id];
295 memset(full_command_packet, 0, sizeof(TW_Command_Full));
296
297
298 memset(&cdb, 0, TW_MAX_CDB_LEN);
299 cdb[0] = REQUEST_SENSE;
300 cdb[4] = TW_ALLOCATION_LENGTH;
301
302
303 memset(&sglist, 0, sizeof(TW_SG_Entry));
304 sglist[0].length = TW_SECTOR_SIZE;
305 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
306
307 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
308 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
309 goto out;
310 }
311
312
313 tw_dev->srb[request_id] = NULL;
314
315 do {
316
317 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
318 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
319 goto out;
320 }
321
322
323 if (twa_poll_response(tw_dev, request_id, 30)) {
324 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
325 tw_dev->posted_request_count--;
326 goto out;
327 }
328
329 tw_dev->posted_request_count--;
330 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
331 aen = le16_to_cpu(header->status_block.error);
332 queue = 0;
333 count++;
334
335 switch (aen) {
336 case TW_AEN_QUEUE_EMPTY:
337 if (first_reset != 1)
338 goto out;
339 else
340 finished = 1;
341 break;
342 case TW_AEN_SOFT_RESET:
343 if (first_reset == 0)
344 first_reset = 1;
345 else
346 queue = 1;
347 break;
348 case TW_AEN_SYNC_TIME_WITH_HOST:
349 break;
350 default:
351 queue = 1;
352 }
353
354
355 if (queue)
356 twa_aen_queue_event(tw_dev, header);
357 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
358
359 if (count == TW_MAX_AEN_DRAIN)
360 goto out;
361
362 retval = 0;
363out:
364 tw_dev->state[request_id] = TW_S_INITIAL;
365 return retval;
366}
367
368
369static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
370{
371 u32 local_time;
372 struct timeval time;
373 TW_Event *event;
374 unsigned short aen;
375 char host[16];
376 char *error_str;
377
378 tw_dev->aen_count++;
379
380
381 event = tw_dev->event_queue[tw_dev->error_index];
382
383
384 host[0] = '\0';
385 if (tw_dev->host) {
386 sprintf(host, " scsi%d:", tw_dev->host->host_no);
387 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
388 tw_dev->aen_clobber = 1;
389 }
390
391 aen = le16_to_cpu(header->status_block.error);
392 memset(event, 0, sizeof(TW_Event));
393
394 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
395 do_gettimeofday(&time);
396 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
397 event->time_stamp_sec = local_time;
398 event->aen_code = aen;
399 event->retrieved = TW_AEN_NOT_RETRIEVED;
400 event->sequence_id = tw_dev->error_sequence_id;
401 tw_dev->error_sequence_id++;
402
403
404 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
405
406 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
407 event->parameter_len = strlen(header->err_specific_desc);
408 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
409 if (event->severity != TW_AEN_SEVERITY_DEBUG)
410 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
411 host,
412 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
413 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
414 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
415 header->err_specific_desc);
416 else
417 tw_dev->aen_count--;
418
419 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
420 tw_dev->event_queue_wrapped = 1;
421 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
422}
423
424
425static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
426{
427 char cdb[TW_MAX_CDB_LEN];
428 TW_SG_Entry sglist[1];
429 TW_Command_Full *full_command_packet;
430 int retval = 1;
431
432 full_command_packet = tw_dev->command_packet_virt[request_id];
433 memset(full_command_packet, 0, sizeof(TW_Command_Full));
434
435
436 memset(&cdb, 0, TW_MAX_CDB_LEN);
437 cdb[0] = REQUEST_SENSE;
438 cdb[4] = TW_ALLOCATION_LENGTH;
439
440
441 memset(&sglist, 0, sizeof(TW_SG_Entry));
442 sglist[0].length = TW_SECTOR_SIZE;
443 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
444
445
446 tw_dev->srb[request_id] = NULL;
447
448
449 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
450 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
451 goto out;
452 }
453 retval = 0;
454out:
455 return retval;
456}
457
458
459static char *twa_aen_severity_lookup(unsigned char severity_code)
460{
461 char *retval = NULL;
462
463 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
464 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
465 goto out;
466
467 retval = twa_aen_severity_table[severity_code];
468out:
469 return retval;
470}
471
472
473static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
474{
475 u32 schedulertime;
476 struct timeval utc;
477 TW_Command_Full *full_command_packet;
478 TW_Command *command_packet;
479 TW_Param_Apache *param;
480 u32 local_time;
481
482
483 full_command_packet = tw_dev->command_packet_virt[request_id];
484 memset(full_command_packet, 0, sizeof(TW_Command_Full));
485 command_packet = &full_command_packet->command.oldcommand;
486 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
487 command_packet->request_id = request_id;
488 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
489 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
490 command_packet->size = TW_COMMAND_SIZE;
491 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
492
493
494 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
495 memset(param, 0, TW_SECTOR_SIZE);
496 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000);
497 param->parameter_id = cpu_to_le16(0x3);
498 param->parameter_size_bytes = cpu_to_le16(4);
499
500
501
502 do_gettimeofday(&utc);
503 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
504 schedulertime = local_time - (3 * 86400);
505 schedulertime = cpu_to_le32(schedulertime % 604800);
506
507 memcpy(param->data, &schedulertime, sizeof(u32));
508
509
510 tw_dev->srb[request_id] = NULL;
511
512
513 twa_post_command_packet(tw_dev, request_id, 1);
514}
515
516
517static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
518{
519 int i;
520 dma_addr_t dma_handle;
521 unsigned long *cpu_addr;
522 int retval = 1;
523
524 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
525 if (!cpu_addr) {
526 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
527 goto out;
528 }
529
530 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
531 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
532 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
533 goto out;
534 }
535
536 memset(cpu_addr, 0, size*TW_Q_LENGTH);
537
538 for (i = 0; i < TW_Q_LENGTH; i++) {
539 switch(which) {
540 case 0:
541 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
542 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
543 break;
544 case 1:
545 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
546 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
547 break;
548 }
549 }
550 retval = 0;
551out:
552 return retval;
553}
554
555
556static int twa_check_bits(u32 status_reg_value)
557{
558 int retval = 1;
559
560 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
561 goto out;
562 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
563 goto out;
564
565 retval = 0;
566out:
567 return retval;
568}
569
570
571static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
572{
573 int retval = 1;
574 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
575 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
576 u32 init_connect_result = 0;
577
578 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
579 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
580 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
581 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
582 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
583 &fw_on_ctlr_build, &init_connect_result)) {
584 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
585 goto out;
586 }
587
588 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
589 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
590 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
591
592
593 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
594 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
595 TW_EXTENDED_INIT_CONNECT,
596 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
597 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
598 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
599 &fw_on_ctlr_branch, &fw_on_ctlr_build,
600 &init_connect_result)) {
601 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
602 goto out;
603 }
604 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
605 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
606 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
607 } else {
608 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
609 }
610 goto out;
611 }
612 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
613 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
614 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
615 }
616
617
618 strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
619 sizeof(tw_dev->tw_compat_info.driver_version));
620 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
621 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
622 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
623 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
624 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
625 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
626 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
627 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
628 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
629
630 retval = 0;
631out:
632 return retval;
633}
634
635
636static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
637{
638 struct inode *inode = file_inode(file);
639 long timeout;
640 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
641 dma_addr_t dma_handle;
642 int request_id = 0;
643 unsigned int sequence_id = 0;
644 unsigned char event_index, start_index;
645 TW_Ioctl_Driver_Command driver_command;
646 TW_Ioctl_Buf_Apache *tw_ioctl;
647 TW_Lock *tw_lock;
648 TW_Command_Full *full_command_packet;
649 TW_Compatibility_Info *tw_compat_info;
650 TW_Event *event;
651 struct timeval current_time;
652 u32 current_time_ms;
653 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
654 int retval = TW_IOCTL_ERROR_OS_EFAULT;
655 void __user *argp = (void __user *)arg;
656
657 mutex_lock(&twa_chrdev_mutex);
658
659
660 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
661 retval = TW_IOCTL_ERROR_OS_EINTR;
662 goto out;
663 }
664
665
666 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
667 goto out2;
668
669
670 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
671 retval = TW_IOCTL_ERROR_OS_EINVAL;
672 goto out2;
673 }
674
675
676 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
677
678
679 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
680 if (!cpu_addr) {
681 retval = TW_IOCTL_ERROR_OS_ENOMEM;
682 goto out2;
683 }
684
685 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
686
687
688 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
689 goto out3;
690
691
692 switch (cmd) {
693 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
694 spin_lock_irqsave(tw_dev->host->host_lock, flags);
695 twa_get_request_id(tw_dev, &request_id);
696
697
698 tw_dev->srb[request_id] = NULL;
699
700
701 tw_dev->chrdev_request_id = request_id;
702
703 full_command_packet = &tw_ioctl->firmware_command;
704
705
706 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
707
708 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
709
710
711 twa_post_command_packet(tw_dev, request_id, 1);
712 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
713
714 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
715
716
717 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
718
719
720 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
721
722 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
723 tw_dev->host->host_no, TW_DRIVER, 0x37,
724 cmd);
725 retval = TW_IOCTL_ERROR_OS_EIO;
726 twa_reset_device_extension(tw_dev);
727 goto out3;
728 }
729
730
731 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
732
733
734 spin_lock_irqsave(tw_dev->host->host_lock, flags);
735 tw_dev->posted_request_count--;
736 tw_dev->state[request_id] = TW_S_COMPLETED;
737 twa_free_request_id(tw_dev, request_id);
738 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
739 break;
740 case TW_IOCTL_GET_COMPATIBILITY_INFO:
741 tw_ioctl->driver_command.status = 0;
742
743 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
744 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
745 break;
746 case TW_IOCTL_GET_LAST_EVENT:
747 if (tw_dev->event_queue_wrapped) {
748 if (tw_dev->aen_clobber) {
749 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
750 tw_dev->aen_clobber = 0;
751 } else
752 tw_ioctl->driver_command.status = 0;
753 } else {
754 if (!tw_dev->error_index) {
755 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
756 break;
757 }
758 tw_ioctl->driver_command.status = 0;
759 }
760 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
761 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
762 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
763 break;
764 case TW_IOCTL_GET_FIRST_EVENT:
765 if (tw_dev->event_queue_wrapped) {
766 if (tw_dev->aen_clobber) {
767 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
768 tw_dev->aen_clobber = 0;
769 } else
770 tw_ioctl->driver_command.status = 0;
771 event_index = tw_dev->error_index;
772 } else {
773 if (!tw_dev->error_index) {
774 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
775 break;
776 }
777 tw_ioctl->driver_command.status = 0;
778 event_index = 0;
779 }
780 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
781 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
782 break;
783 case TW_IOCTL_GET_NEXT_EVENT:
784 event = (TW_Event *)tw_ioctl->data_buffer;
785 sequence_id = event->sequence_id;
786 tw_ioctl->driver_command.status = 0;
787
788 if (tw_dev->event_queue_wrapped) {
789 if (tw_dev->aen_clobber) {
790 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
791 tw_dev->aen_clobber = 0;
792 }
793 start_index = tw_dev->error_index;
794 } else {
795 if (!tw_dev->error_index) {
796 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
797 break;
798 }
799 start_index = 0;
800 }
801 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
802
803 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
804 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
805 tw_dev->aen_clobber = 1;
806 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
807 break;
808 }
809 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
810 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
811 break;
812 case TW_IOCTL_GET_PREVIOUS_EVENT:
813 event = (TW_Event *)tw_ioctl->data_buffer;
814 sequence_id = event->sequence_id;
815 tw_ioctl->driver_command.status = 0;
816
817 if (tw_dev->event_queue_wrapped) {
818 if (tw_dev->aen_clobber) {
819 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
820 tw_dev->aen_clobber = 0;
821 }
822 start_index = tw_dev->error_index;
823 } else {
824 if (!tw_dev->error_index) {
825 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
826 break;
827 }
828 start_index = 0;
829 }
830 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
831
832 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
833 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
834 tw_dev->aen_clobber = 1;
835 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
836 break;
837 }
838 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
839 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
840 break;
841 case TW_IOCTL_GET_LOCK:
842 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
843 do_gettimeofday(¤t_time);
844 current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
845
846 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
847 tw_dev->ioctl_sem_lock = 1;
848 tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
849 tw_ioctl->driver_command.status = 0;
850 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
851 } else {
852 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
853 tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
854 }
855 break;
856 case TW_IOCTL_RELEASE_LOCK:
857 if (tw_dev->ioctl_sem_lock == 1) {
858 tw_dev->ioctl_sem_lock = 0;
859 tw_ioctl->driver_command.status = 0;
860 } else {
861 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
862 }
863 break;
864 default:
865 retval = TW_IOCTL_ERROR_OS_ENOTTY;
866 goto out3;
867 }
868
869
870 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
871 retval = 0;
872out3:
873
874 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
875out2:
876 mutex_unlock(&tw_dev->ioctl_lock);
877out:
878 mutex_unlock(&twa_chrdev_mutex);
879 return retval;
880}
881
882
883
884static int twa_chrdev_open(struct inode *inode, struct file *file)
885{
886 unsigned int minor_number;
887 int retval = TW_IOCTL_ERROR_OS_ENODEV;
888
889 minor_number = iminor(inode);
890 if (minor_number >= twa_device_extension_count)
891 goto out;
892 retval = 0;
893out:
894 return retval;
895}
896
897
898static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
899{
900 int retval = 1;
901
902
903 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
904 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
905 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
906 }
907
908 if (status_reg_value & TW_STATUS_PCI_ABORT) {
909 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
910 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
911 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
912 }
913
914 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
915 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
916 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
917 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
918 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
919 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
920 }
921
922 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
923 if (tw_dev->reset_print == 0) {
924 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
925 tw_dev->reset_print = 1;
926 }
927 goto out;
928 }
929 retval = 0;
930out:
931 return retval;
932}
933
934
935static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
936{
937 u32 status_reg_value, response_que_value;
938 int count = 0, retval = 1;
939
940 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
941
942 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
943 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
944 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
945 count++;
946 }
947 if (count == TW_MAX_RESPONSE_DRAIN)
948 goto out;
949
950 retval = 0;
951out:
952 return retval;
953}
954
955
956static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
957{
958 u32 response_que_value = 0;
959 unsigned long before;
960 int retval = 1;
961
962 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
963 before = jiffies;
964 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
965 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
966 msleep(1);
967 if (time_after(jiffies, before + HZ * 30))
968 goto out;
969 }
970
971 msleep(500);
972 retval = 0;
973 } else
974 retval = 0;
975out:
976 return retval;
977}
978
979
980static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
981{
982 TW_Command_Full *full_command_packet;
983 unsigned short error;
984 int retval = 1;
985 char *error_str;
986
987 full_command_packet = tw_dev->command_packet_virt[request_id];
988
989
990 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
991
992
993 error = le16_to_cpu(full_command_packet->header.status_block.error);
994 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
995 if (print_host)
996 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
997 tw_dev->host->host_no,
998 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
999 full_command_packet->header.status_block.error,
1000 error_str[0] == '\0' ?
1001 twa_string_lookup(twa_error_table,
1002 full_command_packet->header.status_block.error) : error_str,
1003 full_command_packet->header.err_specific_desc);
1004 else
1005 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1006 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1007 full_command_packet->header.status_block.error,
1008 error_str[0] == '\0' ?
1009 twa_string_lookup(twa_error_table,
1010 full_command_packet->header.status_block.error) : error_str,
1011 full_command_packet->header.err_specific_desc);
1012 }
1013
1014 if (copy_sense) {
1015 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1016 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1017 retval = TW_ISR_DONT_RESULT;
1018 goto out;
1019 }
1020 retval = 0;
1021out:
1022 return retval;
1023}
1024
1025
1026static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1027{
1028 if (tw_dev->command_packet_virt[0])
1029 pci_free_consistent(tw_dev->tw_pci_dev,
1030 sizeof(TW_Command_Full)*TW_Q_LENGTH,
1031 tw_dev->command_packet_virt[0],
1032 tw_dev->command_packet_phys[0]);
1033
1034 if (tw_dev->generic_buffer_virt[0])
1035 pci_free_consistent(tw_dev->tw_pci_dev,
1036 TW_SECTOR_SIZE*TW_Q_LENGTH,
1037 tw_dev->generic_buffer_virt[0],
1038 tw_dev->generic_buffer_phys[0]);
1039
1040 kfree(tw_dev->event_queue[0]);
1041}
1042
1043
1044static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1045{
1046 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1047 tw_dev->state[request_id] = TW_S_FINISHED;
1048 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1049}
1050
1051
1052static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1053{
1054 TW_Command_Full *full_command_packet;
1055 TW_Command *command_packet;
1056 TW_Param_Apache *param;
1057 void *retval = NULL;
1058
1059
1060 full_command_packet = tw_dev->command_packet_virt[request_id];
1061 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1062 command_packet = &full_command_packet->command.oldcommand;
1063
1064 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1065 command_packet->size = TW_COMMAND_SIZE;
1066 command_packet->request_id = request_id;
1067 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1068
1069
1070 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1071 memset(param, 0, TW_SECTOR_SIZE);
1072 param->table_id = cpu_to_le16(table_id | 0x8000);
1073 param->parameter_id = cpu_to_le16(parameter_id);
1074 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1075
1076 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1077 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1078
1079
1080 twa_post_command_packet(tw_dev, request_id, 1);
1081
1082
1083 if (twa_poll_response(tw_dev, request_id, 30))
1084 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1085 else
1086 retval = (void *)&(param->data[0]);
1087
1088 tw_dev->posted_request_count--;
1089 tw_dev->state[request_id] = TW_S_INITIAL;
1090
1091 return retval;
1092}
1093
1094
1095static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1096{
1097 *request_id = tw_dev->free_queue[tw_dev->free_head];
1098 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1099 tw_dev->state[*request_id] = TW_S_STARTED;
1100}
1101
1102
1103static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1104 u32 set_features, unsigned short current_fw_srl,
1105 unsigned short current_fw_arch_id,
1106 unsigned short current_fw_branch,
1107 unsigned short current_fw_build,
1108 unsigned short *fw_on_ctlr_srl,
1109 unsigned short *fw_on_ctlr_arch_id,
1110 unsigned short *fw_on_ctlr_branch,
1111 unsigned short *fw_on_ctlr_build,
1112 u32 *init_connect_result)
1113{
1114 TW_Command_Full *full_command_packet;
1115 TW_Initconnect *tw_initconnect;
1116 int request_id = 0, retval = 1;
1117
1118
1119 full_command_packet = tw_dev->command_packet_virt[request_id];
1120 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1121 full_command_packet->header.header_desc.size_header = 128;
1122
1123 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1124 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1125 tw_initconnect->request_id = request_id;
1126 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1127 tw_initconnect->features = set_features;
1128
1129
1130 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1131
1132 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1133
1134 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1135 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1136 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1137 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1138 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1139 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1140 } else
1141 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1142
1143
1144 twa_post_command_packet(tw_dev, request_id, 1);
1145
1146
1147 if (twa_poll_response(tw_dev, request_id, 30)) {
1148 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1149 } else {
1150 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1151 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1152 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1153 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1154 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1155 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1156 }
1157 retval = 0;
1158 }
1159
1160 tw_dev->posted_request_count--;
1161 tw_dev->state[request_id] = TW_S_INITIAL;
1162
1163 return retval;
1164}
1165
1166
1167static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1168{
1169 int i, retval = 1;
1170
1171
1172 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1173 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1174 goto out;
1175 }
1176
1177
1178 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1179 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1180 goto out;
1181 }
1182
1183
1184 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1185 if (!tw_dev->event_queue[0]) {
1186 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1187 goto out;
1188 }
1189
1190
1191 for (i = 0; i < TW_Q_LENGTH; i++) {
1192 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1193 tw_dev->free_queue[i] = i;
1194 tw_dev->state[i] = TW_S_INITIAL;
1195 }
1196
1197 tw_dev->pending_head = TW_Q_START;
1198 tw_dev->pending_tail = TW_Q_START;
1199 tw_dev->free_head = TW_Q_START;
1200 tw_dev->free_tail = TW_Q_START;
1201 tw_dev->error_sequence_id = 1;
1202 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1203
1204 mutex_init(&tw_dev->ioctl_lock);
1205 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1206
1207 retval = 0;
1208out:
1209 return retval;
1210}
1211
1212
1213static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1214{
1215 int request_id, error = 0;
1216 u32 status_reg_value;
1217 TW_Response_Queue response_que;
1218 TW_Command_Full *full_command_packet;
1219 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1220 int handled = 0;
1221
1222
1223 spin_lock(tw_dev->host->host_lock);
1224
1225
1226 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1227
1228
1229 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1230 goto twa_interrupt_bail;
1231
1232 handled = 1;
1233
1234
1235 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1236 goto twa_interrupt_bail;
1237
1238
1239 if (twa_check_bits(status_reg_value)) {
1240 if (twa_decode_bits(tw_dev, status_reg_value)) {
1241 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1242 goto twa_interrupt_bail;
1243 }
1244 }
1245
1246
1247 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1248 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1249
1250
1251 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1252 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1253 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1254 twa_get_request_id(tw_dev, &request_id);
1255
1256 error = twa_aen_read_queue(tw_dev, request_id);
1257 if (error) {
1258 tw_dev->state[request_id] = TW_S_COMPLETED;
1259 twa_free_request_id(tw_dev, request_id);
1260 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1261 }
1262 }
1263 }
1264
1265
1266 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1267 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1268
1269 while (tw_dev->pending_request_count > 0) {
1270 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1271 if (tw_dev->state[request_id] != TW_S_PENDING) {
1272 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1273 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1274 goto twa_interrupt_bail;
1275 }
1276 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1277 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1278 tw_dev->pending_request_count--;
1279 } else {
1280
1281 break;
1282 }
1283 }
1284 }
1285
1286
1287 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1288
1289
1290 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1291
1292 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1293 request_id = TW_RESID_OUT(response_que.response_id);
1294 full_command_packet = tw_dev->command_packet_virt[request_id];
1295 error = 0;
1296
1297 if (full_command_packet->command.newcommand.status != 0) {
1298 if (tw_dev->srb[request_id] != NULL) {
1299 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1300 } else {
1301
1302 if (request_id != tw_dev->chrdev_request_id) {
1303 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1304 }
1305 }
1306 }
1307
1308
1309 if (tw_dev->state[request_id] != TW_S_POSTED) {
1310 if (tw_dev->srb[request_id] != NULL) {
1311 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1312 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1313 goto twa_interrupt_bail;
1314 }
1315 }
1316
1317
1318 if (tw_dev->srb[request_id] == NULL) {
1319 if (request_id != tw_dev->chrdev_request_id) {
1320 if (twa_aen_complete(tw_dev, request_id))
1321 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1322 } else {
1323 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1324 wake_up(&tw_dev->ioctl_wqueue);
1325 }
1326 } else {
1327 struct scsi_cmnd *cmd;
1328
1329 cmd = tw_dev->srb[request_id];
1330
1331 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1332
1333 if (error == 0) {
1334 cmd->result = (DID_OK << 16);
1335 }
1336
1337
1338 if (error == 1) {
1339
1340 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1341 }
1342
1343
1344 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1345 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1346 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1347 }
1348
1349
1350 if (twa_command_mapped(cmd))
1351 scsi_dma_unmap(cmd);
1352 cmd->scsi_done(cmd);
1353 tw_dev->state[request_id] = TW_S_COMPLETED;
1354 twa_free_request_id(tw_dev, request_id);
1355 tw_dev->posted_request_count--;
1356 }
1357
1358
1359 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1360 if (twa_check_bits(status_reg_value)) {
1361 if (twa_decode_bits(tw_dev, status_reg_value)) {
1362 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1363 goto twa_interrupt_bail;
1364 }
1365 }
1366 }
1367 }
1368
1369twa_interrupt_bail:
1370 spin_unlock(tw_dev->host->host_lock);
1371 return IRQ_RETVAL(handled);
1372}
1373
1374
1375static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1376{
1377 TW_Command *oldcommand;
1378 TW_Command_Apache *newcommand;
1379 TW_SG_Entry *sgl;
1380 unsigned int pae = 0;
1381
1382 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1383 pae = 1;
1384
1385 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1386 newcommand = &full_command_packet->command.newcommand;
1387 newcommand->request_id__lunl =
1388 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1389 if (length) {
1390 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1391 newcommand->sg_list[0].length = cpu_to_le32(length);
1392 }
1393 newcommand->sgl_entries__lunh =
1394 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1395 } else {
1396 oldcommand = &full_command_packet->command.oldcommand;
1397 oldcommand->request_id = request_id;
1398
1399 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1400
1401 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1402 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1403 else
1404 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1405 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1406 sgl->length = cpu_to_le32(length);
1407
1408 oldcommand->size += pae;
1409 }
1410 }
1411}
1412
1413
1414static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1415{
1416 int retval = 1, found = 0, response_request_id;
1417 TW_Response_Queue response_queue;
1418 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1419
1420 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1421 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1422 response_request_id = TW_RESID_OUT(response_queue.response_id);
1423 if (request_id != response_request_id) {
1424 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1425 goto out;
1426 }
1427 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1428 if (full_command_packet->command.newcommand.status != 0) {
1429
1430 twa_fill_sense(tw_dev, request_id, 0, 0);
1431 goto out;
1432 }
1433 found = 1;
1434 } else {
1435 if (full_command_packet->command.oldcommand.status != 0) {
1436
1437 twa_fill_sense(tw_dev, request_id, 0, 0);
1438 goto out;
1439 }
1440 found = 1;
1441 }
1442 }
1443
1444 if (found)
1445 retval = 0;
1446out:
1447 return retval;
1448}
1449
1450
1451static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1452{
1453 u32 status_reg_value;
1454 unsigned long before;
1455 int retval = 1;
1456
1457 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1458 before = jiffies;
1459
1460 if (twa_check_bits(status_reg_value))
1461 twa_decode_bits(tw_dev, status_reg_value);
1462
1463 while ((status_reg_value & flag) != flag) {
1464 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1465
1466 if (twa_check_bits(status_reg_value))
1467 twa_decode_bits(tw_dev, status_reg_value);
1468
1469 if (time_after(jiffies, before + HZ * seconds))
1470 goto out;
1471
1472 msleep(50);
1473 }
1474 retval = 0;
1475out:
1476 return retval;
1477}
1478
1479
1480static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1481{
1482 u32 status_reg_value;
1483 unsigned long before;
1484 int retval = 1;
1485
1486 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1487 before = jiffies;
1488
1489 if (twa_check_bits(status_reg_value))
1490 twa_decode_bits(tw_dev, status_reg_value);
1491
1492 while ((status_reg_value & flag) != 0) {
1493 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1494 if (twa_check_bits(status_reg_value))
1495 twa_decode_bits(tw_dev, status_reg_value);
1496
1497 if (time_after(jiffies, before + HZ * seconds))
1498 goto out;
1499
1500 msleep(50);
1501 }
1502 retval = 0;
1503out:
1504 return retval;
1505}
1506
1507
1508static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1509{
1510 u32 status_reg_value;
1511 dma_addr_t command_que_value;
1512 int retval = 1;
1513
1514 command_que_value = tw_dev->command_packet_phys[request_id];
1515
1516
1517 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1518 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1519 command_que_value += TW_COMMAND_OFFSET;
1520 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1521 }
1522
1523 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1524
1525 if (twa_check_bits(status_reg_value))
1526 twa_decode_bits(tw_dev, status_reg_value);
1527
1528 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1529
1530
1531 if (!internal) {
1532 retval = SCSI_MLQUEUE_HOST_BUSY;
1533 goto out;
1534 }
1535
1536
1537 if (tw_dev->state[request_id] != TW_S_PENDING) {
1538 tw_dev->state[request_id] = TW_S_PENDING;
1539 tw_dev->pending_request_count++;
1540 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1541 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1542 }
1543 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1544 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1545 }
1546 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1547 goto out;
1548 } else {
1549 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1550 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1551
1552 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1553 } else {
1554 if (sizeof(dma_addr_t) > 4) {
1555 command_que_value += TW_COMMAND_OFFSET;
1556 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1557 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1558 } else {
1559 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1560 }
1561 }
1562 tw_dev->state[request_id] = TW_S_POSTED;
1563 tw_dev->posted_request_count++;
1564 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1565 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1566 }
1567 }
1568 retval = 0;
1569out:
1570 return retval;
1571}
1572
1573
1574static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1575{
1576 int i = 0;
1577 int retval = 1;
1578 unsigned long flags = 0;
1579
1580 set_bit(TW_IN_RESET, &tw_dev->flags);
1581 TW_DISABLE_INTERRUPTS(tw_dev);
1582 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1583 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1584
1585
1586 for (i = 0; i < TW_Q_LENGTH; i++) {
1587 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1588 (tw_dev->state[i] != TW_S_INITIAL) &&
1589 (tw_dev->state[i] != TW_S_COMPLETED)) {
1590 if (tw_dev->srb[i]) {
1591 struct scsi_cmnd *cmd = tw_dev->srb[i];
1592
1593 cmd->result = (DID_RESET << 16);
1594 if (twa_command_mapped(cmd))
1595 scsi_dma_unmap(cmd);
1596 cmd->scsi_done(cmd);
1597 }
1598 }
1599 }
1600
1601
1602 for (i = 0; i < TW_Q_LENGTH; i++) {
1603 tw_dev->free_queue[i] = i;
1604 tw_dev->state[i] = TW_S_INITIAL;
1605 }
1606 tw_dev->free_head = TW_Q_START;
1607 tw_dev->free_tail = TW_Q_START;
1608 tw_dev->posted_request_count = 0;
1609 tw_dev->pending_request_count = 0;
1610 tw_dev->pending_head = TW_Q_START;
1611 tw_dev->pending_tail = TW_Q_START;
1612 tw_dev->reset_print = 0;
1613
1614 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1615
1616 if (twa_reset_sequence(tw_dev, 1))
1617 goto out;
1618
1619 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1620 clear_bit(TW_IN_RESET, &tw_dev->flags);
1621 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1622
1623 retval = 0;
1624out:
1625 return retval;
1626}
1627
1628
1629static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1630{
1631 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1632
1633 while (tries < TW_MAX_RESET_TRIES) {
1634 if (do_soft_reset) {
1635 TW_SOFT_RESET(tw_dev);
1636
1637 if (twa_empty_response_queue_large(tw_dev)) {
1638 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1639 do_soft_reset = 1;
1640 tries++;
1641 continue;
1642 }
1643 }
1644
1645
1646 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1647 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1648 do_soft_reset = 1;
1649 tries++;
1650 continue;
1651 }
1652
1653
1654 if (twa_empty_response_queue(tw_dev)) {
1655 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1656 do_soft_reset = 1;
1657 tries++;
1658 continue;
1659 }
1660
1661 flashed = 0;
1662
1663
1664 if (twa_check_srl(tw_dev, &flashed)) {
1665 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1666 do_soft_reset = 1;
1667 tries++;
1668 continue;
1669 } else {
1670 if (flashed) {
1671 tries++;
1672 continue;
1673 }
1674 }
1675
1676
1677 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1678 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1679 do_soft_reset = 1;
1680 tries++;
1681 continue;
1682 }
1683
1684
1685 retval = 0;
1686 goto out;
1687 }
1688out:
1689 return retval;
1690}
1691
1692
1693static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1694{
1695 int heads, sectors, cylinders;
1696 TW_Device_Extension *tw_dev;
1697
1698 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1699
1700 if (capacity >= 0x200000) {
1701 heads = 255;
1702 sectors = 63;
1703 cylinders = sector_div(capacity, heads * sectors);
1704 } else {
1705 heads = 64;
1706 sectors = 32;
1707 cylinders = sector_div(capacity, heads * sectors);
1708 }
1709
1710 geom[0] = heads;
1711 geom[1] = sectors;
1712 geom[2] = cylinders;
1713
1714 return 0;
1715}
1716
1717
1718static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1719{
1720 TW_Device_Extension *tw_dev = NULL;
1721 int retval = FAILED;
1722
1723 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1724
1725 tw_dev->num_resets++;
1726
1727 sdev_printk(KERN_WARNING, SCpnt->device,
1728 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1729 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1730
1731
1732 mutex_lock(&tw_dev->ioctl_lock);
1733
1734
1735 if (twa_reset_device_extension(tw_dev)) {
1736 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1737 goto out;
1738 }
1739
1740 retval = SUCCESS;
1741out:
1742 mutex_unlock(&tw_dev->ioctl_lock);
1743 return retval;
1744}
1745
1746
1747static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1748{
1749 int request_id, retval;
1750 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1751
1752
1753 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1754 retval = SCSI_MLQUEUE_HOST_BUSY;
1755 goto out;
1756 }
1757
1758
1759 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1760 SCpnt->result = (DID_BAD_TARGET << 16);
1761 done(SCpnt);
1762 retval = 0;
1763 goto out;
1764 }
1765
1766
1767 SCpnt->scsi_done = done;
1768
1769
1770 twa_get_request_id(tw_dev, &request_id);
1771
1772
1773 tw_dev->srb[request_id] = SCpnt;
1774
1775 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1776 switch (retval) {
1777 case SCSI_MLQUEUE_HOST_BUSY:
1778 if (twa_command_mapped(SCpnt))
1779 scsi_dma_unmap(SCpnt);
1780 twa_free_request_id(tw_dev, request_id);
1781 break;
1782 case 1:
1783 SCpnt->result = (DID_ERROR << 16);
1784 if (twa_command_mapped(SCpnt))
1785 scsi_dma_unmap(SCpnt);
1786 done(SCpnt);
1787 tw_dev->state[request_id] = TW_S_COMPLETED;
1788 twa_free_request_id(tw_dev, request_id);
1789 retval = 0;
1790 }
1791out:
1792 return retval;
1793}
1794
1795static DEF_SCSI_QCMD(twa_scsi_queue)
1796
1797
1798static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1799{
1800 TW_Command_Full *full_command_packet;
1801 TW_Command_Apache *command_packet;
1802 u32 num_sectors = 0x0;
1803 int i, sg_count;
1804 struct scsi_cmnd *srb = NULL;
1805 struct scatterlist *sglist = NULL, *sg;
1806 int retval = 1;
1807
1808 if (tw_dev->srb[request_id]) {
1809 srb = tw_dev->srb[request_id];
1810 if (scsi_sglist(srb))
1811 sglist = scsi_sglist(srb);
1812 }
1813
1814
1815 full_command_packet = tw_dev->command_packet_virt[request_id];
1816 full_command_packet->header.header_desc.size_header = 128;
1817 full_command_packet->header.status_block.error = 0;
1818 full_command_packet->header.status_block.severity__reserved = 0;
1819
1820 command_packet = &full_command_packet->command.newcommand;
1821 command_packet->status = 0;
1822 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1823
1824
1825 if (!cdb)
1826 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1827 else
1828 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1829
1830 if (srb) {
1831 command_packet->unit = srb->device->id;
1832 command_packet->request_id__lunl =
1833 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1834 } else {
1835 command_packet->request_id__lunl =
1836 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1837 command_packet->unit = 0;
1838 }
1839
1840 command_packet->sgl_offset = 16;
1841
1842 if (!sglistarg) {
1843
1844
1845 if (scsi_sg_count(srb)) {
1846 if (!twa_command_mapped(srb)) {
1847 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1848 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1849 scsi_sg_copy_to_buffer(srb,
1850 tw_dev->generic_buffer_virt[request_id],
1851 TW_SECTOR_SIZE);
1852 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1853 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1854 } else {
1855 sg_count = scsi_dma_map(srb);
1856 if (sg_count < 0)
1857 goto out;
1858
1859 scsi_for_each_sg(srb, sg, sg_count, i) {
1860 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1861 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1862 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1863 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1864 goto out;
1865 }
1866 }
1867 }
1868 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1869 }
1870 } else {
1871
1872 for (i = 0; i < use_sg; i++) {
1873 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1874 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1875 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1876 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1877 goto out;
1878 }
1879 }
1880 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1881 }
1882
1883 if (srb) {
1884 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1885 num_sectors = (u32)srb->cmnd[4];
1886
1887 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1888 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1889 }
1890
1891
1892 tw_dev->sector_count = num_sectors;
1893 if (tw_dev->sector_count > tw_dev->max_sector_count)
1894 tw_dev->max_sector_count = tw_dev->sector_count;
1895
1896
1897 if (srb) {
1898 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1899 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1900 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1901 }
1902
1903
1904 if (srb) {
1905 retval = twa_post_command_packet(tw_dev, request_id, 0);
1906 } else {
1907 twa_post_command_packet(tw_dev, request_id, 1);
1908 retval = 0;
1909 }
1910out:
1911 return retval;
1912}
1913
1914
1915static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1916{
1917 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1918
1919 if (!twa_command_mapped(cmd) &&
1920 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1921 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1922 if (scsi_sg_count(cmd) == 1) {
1923 void *buf = tw_dev->generic_buffer_virt[request_id];
1924
1925 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1926 }
1927 }
1928}
1929
1930
1931static void __twa_shutdown(TW_Device_Extension *tw_dev)
1932{
1933
1934 TW_DISABLE_INTERRUPTS(tw_dev);
1935
1936
1937 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1938
1939 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1940
1941
1942 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1943 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1944 } else {
1945 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1946 }
1947
1948
1949 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1950}
1951
1952
1953static void twa_shutdown(struct pci_dev *pdev)
1954{
1955 struct Scsi_Host *host = pci_get_drvdata(pdev);
1956 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1957
1958 __twa_shutdown(tw_dev);
1959}
1960
1961
1962static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1963{
1964 int index;
1965
1966 for (index = 0; ((code != table[index].code) &&
1967 (table[index].text != (char *)0)); index++);
1968 return(table[index].text);
1969}
1970
1971
1972static int twa_slave_configure(struct scsi_device *sdev)
1973{
1974
1975 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1976
1977 return 0;
1978}
1979
1980
1981static struct scsi_host_template driver_template = {
1982 .module = THIS_MODULE,
1983 .name = "3ware 9000 Storage Controller",
1984 .queuecommand = twa_scsi_queue,
1985 .eh_host_reset_handler = twa_scsi_eh_reset,
1986 .bios_param = twa_scsi_biosparam,
1987 .change_queue_depth = scsi_change_queue_depth,
1988 .can_queue = TW_Q_LENGTH-2,
1989 .slave_configure = twa_slave_configure,
1990 .this_id = -1,
1991 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
1992 .max_sectors = TW_MAX_SECTORS,
1993 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1994 .use_clustering = ENABLE_CLUSTERING,
1995 .shost_attrs = twa_host_attrs,
1996 .emulated = 1,
1997 .no_write_same = 1,
1998};
1999
2000
2001static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2002{
2003 struct Scsi_Host *host = NULL;
2004 TW_Device_Extension *tw_dev;
2005 unsigned long mem_addr, mem_len;
2006 int retval = -ENODEV;
2007
2008 retval = pci_enable_device(pdev);
2009 if (retval) {
2010 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2011 goto out_disable_device;
2012 }
2013
2014 pci_set_master(pdev);
2015 pci_try_set_mwi(pdev);
2016
2017 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2018 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2019 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2020 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2021 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2022 retval = -ENODEV;
2023 goto out_disable_device;
2024 }
2025
2026 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2027 if (!host) {
2028 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2029 retval = -ENOMEM;
2030 goto out_disable_device;
2031 }
2032 tw_dev = (TW_Device_Extension *)host->hostdata;
2033
2034
2035 tw_dev->host = host;
2036 tw_dev->tw_pci_dev = pdev;
2037
2038 if (twa_initialize_device_extension(tw_dev)) {
2039 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2040 goto out_free_device_extension;
2041 }
2042
2043
2044 retval = pci_request_regions(pdev, "3w-9xxx");
2045 if (retval) {
2046 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2047 goto out_free_device_extension;
2048 }
2049
2050 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2051 mem_addr = pci_resource_start(pdev, 1);
2052 mem_len = pci_resource_len(pdev, 1);
2053 } else {
2054 mem_addr = pci_resource_start(pdev, 2);
2055 mem_len = pci_resource_len(pdev, 2);
2056 }
2057
2058
2059 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2060 if (!tw_dev->base_addr) {
2061 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2062 goto out_release_mem_region;
2063 }
2064
2065
2066 TW_DISABLE_INTERRUPTS(tw_dev);
2067
2068
2069 if (twa_reset_sequence(tw_dev, 0))
2070 goto out_iounmap;
2071
2072
2073 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2074 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2075 host->max_id = TW_MAX_UNITS_9650SE;
2076 else
2077 host->max_id = TW_MAX_UNITS;
2078
2079 host->max_cmd_len = TW_MAX_CDB_LEN;
2080
2081
2082 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2083 host->max_channel = 0;
2084
2085
2086 retval = scsi_add_host(host, &pdev->dev);
2087 if (retval) {
2088 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2089 goto out_iounmap;
2090 }
2091
2092 pci_set_drvdata(pdev, host);
2093
2094 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2095 host->host_no, mem_addr, pdev->irq);
2096 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2097 host->host_no,
2098 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2099 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2100 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2101 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2102 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2103 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2104
2105
2106 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2107 !pci_enable_msi(pdev))
2108 set_bit(TW_USING_MSI, &tw_dev->flags);
2109
2110
2111 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2112 if (retval) {
2113 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2114 goto out_remove_host;
2115 }
2116
2117 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2118 twa_device_extension_count++;
2119
2120
2121 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2122
2123
2124 scsi_scan_host(host);
2125
2126 if (twa_major == -1) {
2127 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2128 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2129 }
2130 return 0;
2131
2132out_remove_host:
2133 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2134 pci_disable_msi(pdev);
2135 scsi_remove_host(host);
2136out_iounmap:
2137 iounmap(tw_dev->base_addr);
2138out_release_mem_region:
2139 pci_release_regions(pdev);
2140out_free_device_extension:
2141 twa_free_device_extension(tw_dev);
2142 scsi_host_put(host);
2143out_disable_device:
2144 pci_disable_device(pdev);
2145
2146 return retval;
2147}
2148
2149
2150static void twa_remove(struct pci_dev *pdev)
2151{
2152 struct Scsi_Host *host = pci_get_drvdata(pdev);
2153 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2154
2155 scsi_remove_host(tw_dev->host);
2156
2157
2158 if (twa_major >= 0) {
2159 unregister_chrdev(twa_major, "twa");
2160 twa_major = -1;
2161 }
2162
2163
2164 __twa_shutdown(tw_dev);
2165
2166
2167 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2168 pci_disable_msi(pdev);
2169
2170
2171 iounmap(tw_dev->base_addr);
2172
2173
2174 pci_release_regions(pdev);
2175
2176
2177 twa_free_device_extension(tw_dev);
2178
2179 scsi_host_put(tw_dev->host);
2180 pci_disable_device(pdev);
2181 twa_device_extension_count--;
2182}
2183
2184#ifdef CONFIG_PM
2185
2186static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2187{
2188 struct Scsi_Host *host = pci_get_drvdata(pdev);
2189 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2190
2191 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2192
2193 TW_DISABLE_INTERRUPTS(tw_dev);
2194 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2195
2196 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2197 pci_disable_msi(pdev);
2198
2199
2200 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2201 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2202 } else {
2203 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2204 }
2205 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2206
2207 pci_save_state(pdev);
2208 pci_disable_device(pdev);
2209 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2210
2211 return 0;
2212}
2213
2214
2215static int twa_resume(struct pci_dev *pdev)
2216{
2217 int retval = 0;
2218 struct Scsi_Host *host = pci_get_drvdata(pdev);
2219 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2220
2221 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2222 pci_set_power_state(pdev, PCI_D0);
2223 pci_enable_wake(pdev, PCI_D0, 0);
2224 pci_restore_state(pdev);
2225
2226 retval = pci_enable_device(pdev);
2227 if (retval) {
2228 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2229 return retval;
2230 }
2231
2232 pci_set_master(pdev);
2233 pci_try_set_mwi(pdev);
2234
2235 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2236 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2237 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2238 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2239 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2240 retval = -ENODEV;
2241 goto out_disable_device;
2242 }
2243
2244
2245 if (twa_reset_sequence(tw_dev, 0)) {
2246 retval = -ENODEV;
2247 goto out_disable_device;
2248 }
2249
2250
2251 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2252 if (retval) {
2253 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2254 retval = -ENODEV;
2255 goto out_disable_device;
2256 }
2257
2258
2259 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2260 pci_enable_msi(pdev);
2261
2262
2263 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2264
2265 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2266 return 0;
2267
2268out_disable_device:
2269 scsi_remove_host(host);
2270 pci_disable_device(pdev);
2271
2272 return retval;
2273}
2274#endif
2275
2276
2277static struct pci_device_id twa_pci_tbl[] = {
2278 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2279 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2280 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2281 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2282 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2283 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2284 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2285 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2286 { }
2287};
2288MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2289
2290
2291static struct pci_driver twa_driver = {
2292 .name = "3w-9xxx",
2293 .id_table = twa_pci_tbl,
2294 .probe = twa_probe,
2295 .remove = twa_remove,
2296#ifdef CONFIG_PM
2297 .suspend = twa_suspend,
2298 .resume = twa_resume,
2299#endif
2300 .shutdown = twa_shutdown
2301};
2302
2303
2304static int __init twa_init(void)
2305{
2306 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2307
2308 return pci_register_driver(&twa_driver);
2309}
2310
2311
2312static void __exit twa_exit(void)
2313{
2314 pci_unregister_driver(&twa_driver);
2315}
2316
2317module_init(twa_init);
2318module_exit(twa_exit);
2319
2320