1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81#include <linux/module.h>
82#include <linux/reboot.h>
83#include <linux/spinlock.h>
84#include <linux/interrupt.h>
85#include <linux/moduleparam.h>
86#include <linux/errno.h>
87#include <linux/types.h>
88#include <linux/delay.h>
89#include <linux/pci.h>
90#include <linux/time.h>
91#include <linux/mutex.h>
92#include <linux/slab.h>
93#include <asm/io.h>
94#include <asm/irq.h>
95#include <linux/uaccess.h>
96#include <scsi/scsi.h>
97#include <scsi/scsi_host.h>
98#include <scsi/scsi_tcq.h>
99#include <scsi/scsi_cmnd.h>
100#include "3w-9xxx.h"
101
102
103#define TW_DRIVER_VERSION "2.26.02.014"
104static DEFINE_MUTEX(twa_chrdev_mutex);
105static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
106static unsigned int twa_device_extension_count;
107static int twa_major = -1;
108extern struct timezone sys_tz;
109
110
111MODULE_AUTHOR ("LSI");
112MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
113MODULE_LICENSE("GPL");
114MODULE_VERSION(TW_DRIVER_VERSION);
115
116static int use_msi = 0;
117module_param(use_msi, int, S_IRUGO);
118MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
119
120
121static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
122static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
123static char *twa_aen_severity_lookup(unsigned char severity_code);
124static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
125static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
126static int twa_chrdev_open(struct inode *inode, struct file *file);
127static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
128static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
129static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
130static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
131 u32 set_features, unsigned short current_fw_srl,
132 unsigned short current_fw_arch_id,
133 unsigned short current_fw_branch,
134 unsigned short current_fw_build,
135 unsigned short *fw_on_ctlr_srl,
136 unsigned short *fw_on_ctlr_arch_id,
137 unsigned short *fw_on_ctlr_branch,
138 unsigned short *fw_on_ctlr_build,
139 u32 *init_connect_result);
140static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
141static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
142static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
143static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
144static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
145static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
146static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
147 unsigned char *cdb, int use_sg,
148 TW_SG_Entry *sglistarg);
149static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
150static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
151
152
153
154
155static ssize_t twa_show_stats(struct device *dev,
156 struct device_attribute *attr, char *buf)
157{
158 struct Scsi_Host *host = class_to_shost(dev);
159 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
160 unsigned long flags = 0;
161 ssize_t len;
162
163 spin_lock_irqsave(tw_dev->host->host_lock, flags);
164 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
165 "Current commands posted: %4d\n"
166 "Max commands posted: %4d\n"
167 "Current pending commands: %4d\n"
168 "Max pending commands: %4d\n"
169 "Last sgl length: %4d\n"
170 "Max sgl length: %4d\n"
171 "Last sector count: %4d\n"
172 "Max sector count: %4d\n"
173 "SCSI Host Resets: %4d\n"
174 "AEN's: %4d\n",
175 TW_DRIVER_VERSION,
176 tw_dev->posted_request_count,
177 tw_dev->max_posted_request_count,
178 tw_dev->pending_request_count,
179 tw_dev->max_pending_request_count,
180 tw_dev->sgl_entries,
181 tw_dev->max_sgl_entries,
182 tw_dev->sector_count,
183 tw_dev->max_sector_count,
184 tw_dev->num_resets,
185 tw_dev->aen_count);
186 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
187 return len;
188}
189
190
191static struct device_attribute twa_host_stats_attr = {
192 .attr = {
193 .name = "stats",
194 .mode = S_IRUGO,
195 },
196 .show = twa_show_stats
197};
198
199
200static struct device_attribute *twa_host_attrs[] = {
201 &twa_host_stats_attr,
202 NULL,
203};
204
205
206static const struct file_operations twa_fops = {
207 .owner = THIS_MODULE,
208 .unlocked_ioctl = twa_chrdev_ioctl,
209 .open = twa_chrdev_open,
210 .release = NULL,
211 .llseek = noop_llseek,
212};
213
214
215
216
217
218
219static bool twa_command_mapped(struct scsi_cmnd *cmd)
220{
221 return scsi_sg_count(cmd) != 1 ||
222 scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
223}
224
225
226static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
227{
228 TW_Command_Full *full_command_packet;
229 TW_Command *command_packet;
230 TW_Command_Apache_Header *header;
231 unsigned short aen;
232 int retval = 1;
233
234 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
235 tw_dev->posted_request_count--;
236 aen = le16_to_cpu(header->status_block.error);
237 full_command_packet = tw_dev->command_packet_virt[request_id];
238 command_packet = &full_command_packet->command.oldcommand;
239
240
241 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
242
243 if (twa_aen_read_queue(tw_dev, request_id))
244 goto out2;
245 else {
246 retval = 0;
247 goto out;
248 }
249 }
250
251 switch (aen) {
252 case TW_AEN_QUEUE_EMPTY:
253
254 break;
255 case TW_AEN_SYNC_TIME_WITH_HOST:
256 twa_aen_sync_time(tw_dev, request_id);
257 retval = 0;
258 goto out;
259 default:
260 twa_aen_queue_event(tw_dev, header);
261
262
263 if (twa_aen_read_queue(tw_dev, request_id))
264 goto out2;
265 else {
266 retval = 0;
267 goto out;
268 }
269 }
270 retval = 0;
271out2:
272 tw_dev->state[request_id] = TW_S_COMPLETED;
273 twa_free_request_id(tw_dev, request_id);
274 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
275out:
276 return retval;
277}
278
279
280static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
281{
282 int request_id = 0;
283 unsigned char cdb[TW_MAX_CDB_LEN];
284 TW_SG_Entry sglist[1];
285 int finished = 0, count = 0;
286 TW_Command_Full *full_command_packet;
287 TW_Command_Apache_Header *header;
288 unsigned short aen;
289 int first_reset = 0, queue = 0, retval = 1;
290
291 if (no_check_reset)
292 first_reset = 0;
293 else
294 first_reset = 1;
295
296 full_command_packet = tw_dev->command_packet_virt[request_id];
297 memset(full_command_packet, 0, sizeof(TW_Command_Full));
298
299
300 memset(&cdb, 0, TW_MAX_CDB_LEN);
301 cdb[0] = REQUEST_SENSE;
302 cdb[4] = TW_ALLOCATION_LENGTH;
303
304
305 memset(&sglist, 0, sizeof(TW_SG_Entry));
306 sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
307 sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
308
309 if (tw_dev->generic_buffer_phys[request_id] & TW_ALIGNMENT_9000_SGL) {
310 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
311 goto out;
312 }
313
314
315 tw_dev->srb[request_id] = NULL;
316
317 do {
318
319 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
320 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
321 goto out;
322 }
323
324
325 if (twa_poll_response(tw_dev, request_id, 30)) {
326 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
327 tw_dev->posted_request_count--;
328 goto out;
329 }
330
331 tw_dev->posted_request_count--;
332 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
333 aen = le16_to_cpu(header->status_block.error);
334 queue = 0;
335 count++;
336
337 switch (aen) {
338 case TW_AEN_QUEUE_EMPTY:
339 if (first_reset != 1)
340 goto out;
341 else
342 finished = 1;
343 break;
344 case TW_AEN_SOFT_RESET:
345 if (first_reset == 0)
346 first_reset = 1;
347 else
348 queue = 1;
349 break;
350 case TW_AEN_SYNC_TIME_WITH_HOST:
351 break;
352 default:
353 queue = 1;
354 }
355
356
357 if (queue)
358 twa_aen_queue_event(tw_dev, header);
359 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
360
361 if (count == TW_MAX_AEN_DRAIN)
362 goto out;
363
364 retval = 0;
365out:
366 tw_dev->state[request_id] = TW_S_INITIAL;
367 return retval;
368}
369
370
371static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
372{
373 u32 local_time;
374 TW_Event *event;
375 unsigned short aen;
376 char host[16];
377 char *error_str;
378
379 tw_dev->aen_count++;
380
381
382 event = tw_dev->event_queue[tw_dev->error_index];
383
384
385 host[0] = '\0';
386 if (tw_dev->host) {
387 sprintf(host, " scsi%d:", tw_dev->host->host_no);
388 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
389 tw_dev->aen_clobber = 1;
390 }
391
392 aen = le16_to_cpu(header->status_block.error);
393 memset(event, 0, sizeof(TW_Event));
394
395 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
396
397 local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
398 event->time_stamp_sec = local_time;
399 event->aen_code = aen;
400 event->retrieved = TW_AEN_NOT_RETRIEVED;
401 event->sequence_id = tw_dev->error_sequence_id;
402 tw_dev->error_sequence_id++;
403
404
405 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
406
407 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
408 event->parameter_len = strlen(header->err_specific_desc);
409 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
410 if (event->severity != TW_AEN_SEVERITY_DEBUG)
411 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
412 host,
413 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
414 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
415 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
416 header->err_specific_desc);
417 else
418 tw_dev->aen_count--;
419
420 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
421 tw_dev->event_queue_wrapped = 1;
422 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
423}
424
425
426static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
427{
428 unsigned char cdb[TW_MAX_CDB_LEN];
429 TW_SG_Entry sglist[1];
430 TW_Command_Full *full_command_packet;
431 int retval = 1;
432
433 full_command_packet = tw_dev->command_packet_virt[request_id];
434 memset(full_command_packet, 0, sizeof(TW_Command_Full));
435
436
437 memset(&cdb, 0, TW_MAX_CDB_LEN);
438 cdb[0] = REQUEST_SENSE;
439 cdb[4] = TW_ALLOCATION_LENGTH;
440
441
442 memset(&sglist, 0, sizeof(TW_SG_Entry));
443 sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
444 sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
445
446
447 tw_dev->srb[request_id] = NULL;
448
449
450 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
451 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
452 goto out;
453 }
454 retval = 0;
455out:
456 return retval;
457}
458
459
460static char *twa_aen_severity_lookup(unsigned char severity_code)
461{
462 char *retval = NULL;
463
464 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
465 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
466 goto out;
467
468 retval = twa_aen_severity_table[severity_code];
469out:
470 return retval;
471}
472
473
474static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
475{
476 u32 schedulertime;
477 TW_Command_Full *full_command_packet;
478 TW_Command *command_packet;
479 TW_Param_Apache *param;
480 time64_t local_time;
481
482
483 full_command_packet = tw_dev->command_packet_virt[request_id];
484 memset(full_command_packet, 0, sizeof(TW_Command_Full));
485 command_packet = &full_command_packet->command.oldcommand;
486 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
487 command_packet->request_id = request_id;
488 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
489 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
490 command_packet->size = TW_COMMAND_SIZE;
491 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
492
493
494 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
495 memset(param, 0, TW_SECTOR_SIZE);
496 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000);
497 param->parameter_id = cpu_to_le16(0x3);
498 param->parameter_size_bytes = cpu_to_le16(4);
499
500
501
502 local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
503 div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
504
505 memcpy(param->data, &(__le32){cpu_to_le32(schedulertime)}, sizeof(__le32));
506
507
508 tw_dev->srb[request_id] = NULL;
509
510
511 twa_post_command_packet(tw_dev, request_id, 1);
512}
513
514
515static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
516{
517 int i;
518 dma_addr_t dma_handle;
519 unsigned long *cpu_addr;
520 int retval = 1;
521
522 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
523 size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
524 if (!cpu_addr) {
525 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
526 goto out;
527 }
528
529 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
530 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
531 dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
532 cpu_addr, dma_handle);
533 goto out;
534 }
535
536 memset(cpu_addr, 0, size*TW_Q_LENGTH);
537
538 for (i = 0; i < TW_Q_LENGTH; i++) {
539 switch(which) {
540 case 0:
541 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
542 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
543 break;
544 case 1:
545 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
546 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
547 break;
548 }
549 }
550 retval = 0;
551out:
552 return retval;
553}
554
555
556static int twa_check_bits(u32 status_reg_value)
557{
558 int retval = 1;
559
560 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
561 goto out;
562 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
563 goto out;
564
565 retval = 0;
566out:
567 return retval;
568}
569
570
571static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
572{
573 int retval = 1;
574 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
575 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
576 u32 init_connect_result = 0;
577
578 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
579 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
580 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
581 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
582 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
583 &fw_on_ctlr_build, &init_connect_result)) {
584 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
585 goto out;
586 }
587
588 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
589 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
590 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
591
592
593 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
594 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
595 TW_EXTENDED_INIT_CONNECT,
596 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
597 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
598 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
599 &fw_on_ctlr_branch, &fw_on_ctlr_build,
600 &init_connect_result)) {
601 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
602 goto out;
603 }
604 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
605 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
606 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
607 } else {
608 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
609 }
610 goto out;
611 }
612 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
613 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
614 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
615 }
616
617
618 strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
619 sizeof(tw_dev->tw_compat_info.driver_version));
620 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
621 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
622 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
623 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
624 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
625 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
626 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
627 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
628 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
629
630 retval = 0;
631out:
632 return retval;
633}
634
635
636static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
637{
638 struct inode *inode = file_inode(file);
639 long timeout;
640 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
641 dma_addr_t dma_handle;
642 int request_id = 0;
643 unsigned int sequence_id = 0;
644 unsigned char event_index, start_index;
645 TW_Ioctl_Driver_Command driver_command;
646 TW_Ioctl_Buf_Apache *tw_ioctl;
647 TW_Lock *tw_lock;
648 TW_Command_Full *full_command_packet;
649 TW_Compatibility_Info *tw_compat_info;
650 TW_Event *event;
651 ktime_t current_time;
652 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
653 int retval = TW_IOCTL_ERROR_OS_EFAULT;
654 void __user *argp = (void __user *)arg;
655
656 mutex_lock(&twa_chrdev_mutex);
657
658
659 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
660 retval = TW_IOCTL_ERROR_OS_EINTR;
661 goto out;
662 }
663
664
665 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
666 goto out2;
667
668
669 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
670 retval = TW_IOCTL_ERROR_OS_EINVAL;
671 goto out2;
672 }
673
674
675 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
676
677
678 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
679 sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
680 &dma_handle, GFP_KERNEL);
681 if (!cpu_addr) {
682 retval = TW_IOCTL_ERROR_OS_ENOMEM;
683 goto out2;
684 }
685
686 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
687
688
689 if (copy_from_user(tw_ioctl, argp, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length))
690 goto out3;
691
692
693 switch (cmd) {
694 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
695 spin_lock_irqsave(tw_dev->host->host_lock, flags);
696 twa_get_request_id(tw_dev, &request_id);
697
698
699 tw_dev->srb[request_id] = NULL;
700
701
702 tw_dev->chrdev_request_id = request_id;
703
704 full_command_packet = &tw_ioctl->firmware_command;
705
706
707 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
708
709 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
710
711
712 twa_post_command_packet(tw_dev, request_id, 1);
713 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
714
715 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
716
717
718 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
719
720
721 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
722
723 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
724 tw_dev->host->host_no, TW_DRIVER, 0x37,
725 cmd);
726 retval = TW_IOCTL_ERROR_OS_EIO;
727 twa_reset_device_extension(tw_dev);
728 goto out3;
729 }
730
731
732 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
733
734
735 spin_lock_irqsave(tw_dev->host->host_lock, flags);
736 tw_dev->posted_request_count--;
737 tw_dev->state[request_id] = TW_S_COMPLETED;
738 twa_free_request_id(tw_dev, request_id);
739 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
740 break;
741 case TW_IOCTL_GET_COMPATIBILITY_INFO:
742 tw_ioctl->driver_command.status = 0;
743
744 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
745 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
746 break;
747 case TW_IOCTL_GET_LAST_EVENT:
748 if (tw_dev->event_queue_wrapped) {
749 if (tw_dev->aen_clobber) {
750 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
751 tw_dev->aen_clobber = 0;
752 } else
753 tw_ioctl->driver_command.status = 0;
754 } else {
755 if (!tw_dev->error_index) {
756 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
757 break;
758 }
759 tw_ioctl->driver_command.status = 0;
760 }
761 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
762 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
763 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
764 break;
765 case TW_IOCTL_GET_FIRST_EVENT:
766 if (tw_dev->event_queue_wrapped) {
767 if (tw_dev->aen_clobber) {
768 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
769 tw_dev->aen_clobber = 0;
770 } else
771 tw_ioctl->driver_command.status = 0;
772 event_index = tw_dev->error_index;
773 } else {
774 if (!tw_dev->error_index) {
775 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
776 break;
777 }
778 tw_ioctl->driver_command.status = 0;
779 event_index = 0;
780 }
781 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
782 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
783 break;
784 case TW_IOCTL_GET_NEXT_EVENT:
785 event = (TW_Event *)tw_ioctl->data_buffer;
786 sequence_id = event->sequence_id;
787 tw_ioctl->driver_command.status = 0;
788
789 if (tw_dev->event_queue_wrapped) {
790 if (tw_dev->aen_clobber) {
791 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
792 tw_dev->aen_clobber = 0;
793 }
794 start_index = tw_dev->error_index;
795 } else {
796 if (!tw_dev->error_index) {
797 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
798 break;
799 }
800 start_index = 0;
801 }
802 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
803
804 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
805 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
806 tw_dev->aen_clobber = 1;
807 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
808 break;
809 }
810 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
811 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
812 break;
813 case TW_IOCTL_GET_PREVIOUS_EVENT:
814 event = (TW_Event *)tw_ioctl->data_buffer;
815 sequence_id = event->sequence_id;
816 tw_ioctl->driver_command.status = 0;
817
818 if (tw_dev->event_queue_wrapped) {
819 if (tw_dev->aen_clobber) {
820 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
821 tw_dev->aen_clobber = 0;
822 }
823 start_index = tw_dev->error_index;
824 } else {
825 if (!tw_dev->error_index) {
826 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
827 break;
828 }
829 start_index = 0;
830 }
831 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
832
833 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
834 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
835 tw_dev->aen_clobber = 1;
836 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
837 break;
838 }
839 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
840 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
841 break;
842 case TW_IOCTL_GET_LOCK:
843 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
844 current_time = ktime_get();
845
846 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
847 ktime_after(current_time, tw_dev->ioctl_time)) {
848 tw_dev->ioctl_sem_lock = 1;
849 tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
850 tw_ioctl->driver_command.status = 0;
851 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
852 } else {
853 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
854 tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
855 }
856 break;
857 case TW_IOCTL_RELEASE_LOCK:
858 if (tw_dev->ioctl_sem_lock == 1) {
859 tw_dev->ioctl_sem_lock = 0;
860 tw_ioctl->driver_command.status = 0;
861 } else {
862 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
863 }
864 break;
865 default:
866 retval = TW_IOCTL_ERROR_OS_ENOTTY;
867 goto out3;
868 }
869
870
871 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0)
872 retval = 0;
873out3:
874
875 dma_free_coherent(&tw_dev->tw_pci_dev->dev,
876 sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
877 cpu_addr, dma_handle);
878out2:
879 mutex_unlock(&tw_dev->ioctl_lock);
880out:
881 mutex_unlock(&twa_chrdev_mutex);
882 return retval;
883}
884
885
886
887static int twa_chrdev_open(struct inode *inode, struct file *file)
888{
889 unsigned int minor_number;
890 int retval = TW_IOCTL_ERROR_OS_ENODEV;
891
892 if (!capable(CAP_SYS_ADMIN)) {
893 retval = -EACCES;
894 goto out;
895 }
896
897 minor_number = iminor(inode);
898 if (minor_number >= twa_device_extension_count)
899 goto out;
900 retval = 0;
901out:
902 return retval;
903}
904
905
906static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
907{
908 int retval = 1;
909
910
911 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
912 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
913 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
914 }
915
916 if (status_reg_value & TW_STATUS_PCI_ABORT) {
917 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
918 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
919 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
920 }
921
922 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
923 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
924 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
925 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
926 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
927 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
928 }
929
930 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
931 if (tw_dev->reset_print == 0) {
932 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
933 tw_dev->reset_print = 1;
934 }
935 goto out;
936 }
937 retval = 0;
938out:
939 return retval;
940}
941
942
943static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
944{
945 u32 status_reg_value;
946 int count = 0, retval = 1;
947
948 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
949
950 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
951 readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
952 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
953 count++;
954 }
955 if (count == TW_MAX_RESPONSE_DRAIN)
956 goto out;
957
958 retval = 0;
959out:
960 return retval;
961}
962
963
964static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
965{
966 u32 response_que_value = 0;
967 unsigned long before;
968 int retval = 1;
969
970 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
971 before = jiffies;
972 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
973 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
974 msleep(1);
975 if (time_after(jiffies, before + HZ * 30))
976 goto out;
977 }
978
979 msleep(500);
980 retval = 0;
981 } else
982 retval = 0;
983out:
984 return retval;
985}
986
987
988static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
989{
990 TW_Command_Full *full_command_packet;
991 unsigned short error;
992 int retval = 1;
993 char *error_str;
994
995 full_command_packet = tw_dev->command_packet_virt[request_id];
996
997
998 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
999
1000
1001 error = le16_to_cpu(full_command_packet->header.status_block.error);
1002 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
1003 if (print_host)
1004 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1005 tw_dev->host->host_no,
1006 TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
1007 error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
1008 full_command_packet->header.err_specific_desc);
1009 else
1010 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1011 TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
1012 error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
1013 full_command_packet->header.err_specific_desc);
1014 }
1015
1016 if (copy_sense) {
1017 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1018 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1019 retval = TW_ISR_DONT_RESULT;
1020 goto out;
1021 }
1022 retval = 0;
1023out:
1024 return retval;
1025}
1026
1027
1028static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1029{
1030 if (tw_dev->command_packet_virt[0])
1031 dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1032 sizeof(TW_Command_Full) * TW_Q_LENGTH,
1033 tw_dev->command_packet_virt[0],
1034 tw_dev->command_packet_phys[0]);
1035
1036 if (tw_dev->generic_buffer_virt[0])
1037 dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1038 TW_SECTOR_SIZE * TW_Q_LENGTH,
1039 tw_dev->generic_buffer_virt[0],
1040 tw_dev->generic_buffer_phys[0]);
1041
1042 kfree(tw_dev->event_queue[0]);
1043}
1044
1045
1046static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1047{
1048 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1049 tw_dev->state[request_id] = TW_S_FINISHED;
1050 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1051}
1052
1053
1054static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1055{
1056 TW_Command_Full *full_command_packet;
1057 TW_Command *command_packet;
1058 TW_Param_Apache *param;
1059 void *retval = NULL;
1060
1061
1062 full_command_packet = tw_dev->command_packet_virt[request_id];
1063 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1064 command_packet = &full_command_packet->command.oldcommand;
1065
1066 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1067 command_packet->size = TW_COMMAND_SIZE;
1068 command_packet->request_id = request_id;
1069 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1070
1071
1072 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1073 memset(param, 0, TW_SECTOR_SIZE);
1074 param->table_id = cpu_to_le16(table_id | 0x8000);
1075 param->parameter_id = cpu_to_le16(parameter_id);
1076 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1077
1078 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1079 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1080
1081
1082 twa_post_command_packet(tw_dev, request_id, 1);
1083
1084
1085 if (twa_poll_response(tw_dev, request_id, 30))
1086 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1087 else
1088 retval = (void *)&(param->data[0]);
1089
1090 tw_dev->posted_request_count--;
1091 tw_dev->state[request_id] = TW_S_INITIAL;
1092
1093 return retval;
1094}
1095
1096
1097static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1098{
1099 *request_id = tw_dev->free_queue[tw_dev->free_head];
1100 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1101 tw_dev->state[*request_id] = TW_S_STARTED;
1102}
1103
1104
1105static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1106 u32 set_features, unsigned short current_fw_srl,
1107 unsigned short current_fw_arch_id,
1108 unsigned short current_fw_branch,
1109 unsigned short current_fw_build,
1110 unsigned short *fw_on_ctlr_srl,
1111 unsigned short *fw_on_ctlr_arch_id,
1112 unsigned short *fw_on_ctlr_branch,
1113 unsigned short *fw_on_ctlr_build,
1114 u32 *init_connect_result)
1115{
1116 TW_Command_Full *full_command_packet;
1117 TW_Initconnect *tw_initconnect;
1118 int request_id = 0, retval = 1;
1119
1120
1121 full_command_packet = tw_dev->command_packet_virt[request_id];
1122 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1123 full_command_packet->header.header_desc.size_header = 128;
1124
1125 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1126 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1127 tw_initconnect->request_id = request_id;
1128 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1129
1130
1131 set_features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1132
1133 tw_initconnect->features = cpu_to_le32(set_features);
1134
1135 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1136 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1137 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1138 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1139 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1140 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1141 } else
1142 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1143
1144
1145 twa_post_command_packet(tw_dev, request_id, 1);
1146
1147
1148 if (twa_poll_response(tw_dev, request_id, 30)) {
1149 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1150 } else {
1151 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1152 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1153 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1154 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1155 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1156 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1157 }
1158 retval = 0;
1159 }
1160
1161 tw_dev->posted_request_count--;
1162 tw_dev->state[request_id] = TW_S_INITIAL;
1163
1164 return retval;
1165}
1166
1167
1168static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1169{
1170 int i, retval = 1;
1171
1172
1173 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1174 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1175 goto out;
1176 }
1177
1178
1179 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1180 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1181 goto out;
1182 }
1183
1184
1185 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1186 if (!tw_dev->event_queue[0]) {
1187 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1188 goto out;
1189 }
1190
1191
1192 for (i = 0; i < TW_Q_LENGTH; i++) {
1193 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1194 tw_dev->free_queue[i] = i;
1195 tw_dev->state[i] = TW_S_INITIAL;
1196 }
1197
1198 tw_dev->pending_head = TW_Q_START;
1199 tw_dev->pending_tail = TW_Q_START;
1200 tw_dev->free_head = TW_Q_START;
1201 tw_dev->free_tail = TW_Q_START;
1202 tw_dev->error_sequence_id = 1;
1203 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1204
1205 mutex_init(&tw_dev->ioctl_lock);
1206 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1207
1208 retval = 0;
1209out:
1210 return retval;
1211}
1212
1213
1214static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1215{
1216 int request_id, error = 0;
1217 u32 status_reg_value;
1218 TW_Response_Queue response_que;
1219 TW_Command_Full *full_command_packet;
1220 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1221 int handled = 0;
1222
1223
1224 spin_lock(tw_dev->host->host_lock);
1225
1226
1227 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1228
1229
1230 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1231 goto twa_interrupt_bail;
1232
1233 handled = 1;
1234
1235
1236 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1237 goto twa_interrupt_bail;
1238
1239
1240 if (twa_check_bits(status_reg_value)) {
1241 if (twa_decode_bits(tw_dev, status_reg_value)) {
1242 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1243 goto twa_interrupt_bail;
1244 }
1245 }
1246
1247
1248 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1249 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1250
1251
1252 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1253 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1254 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1255 twa_get_request_id(tw_dev, &request_id);
1256
1257 error = twa_aen_read_queue(tw_dev, request_id);
1258 if (error) {
1259 tw_dev->state[request_id] = TW_S_COMPLETED;
1260 twa_free_request_id(tw_dev, request_id);
1261 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1262 }
1263 }
1264 }
1265
1266
1267 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1268 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1269
1270 while (tw_dev->pending_request_count > 0) {
1271 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1272 if (tw_dev->state[request_id] != TW_S_PENDING) {
1273 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1274 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1275 goto twa_interrupt_bail;
1276 }
1277 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1278 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1279 tw_dev->pending_request_count--;
1280 } else {
1281
1282 break;
1283 }
1284 }
1285 }
1286
1287
1288 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1289
1290
1291 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1292
1293 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1294 request_id = TW_RESID_OUT(response_que.response_id);
1295 full_command_packet = tw_dev->command_packet_virt[request_id];
1296 error = 0;
1297
1298 if (full_command_packet->command.newcommand.status != 0) {
1299 if (tw_dev->srb[request_id] != NULL) {
1300 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1301 } else {
1302
1303 if (request_id != tw_dev->chrdev_request_id) {
1304 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1305 }
1306 }
1307 }
1308
1309
1310 if (tw_dev->state[request_id] != TW_S_POSTED) {
1311 if (tw_dev->srb[request_id] != NULL) {
1312 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1313 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1314 goto twa_interrupt_bail;
1315 }
1316 }
1317
1318
1319 if (tw_dev->srb[request_id] == NULL) {
1320 if (request_id != tw_dev->chrdev_request_id) {
1321 if (twa_aen_complete(tw_dev, request_id))
1322 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1323 } else {
1324 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1325 wake_up(&tw_dev->ioctl_wqueue);
1326 }
1327 } else {
1328 struct scsi_cmnd *cmd;
1329
1330 cmd = tw_dev->srb[request_id];
1331
1332 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1333
1334 if (error == 0) {
1335 cmd->result = (DID_OK << 16);
1336 }
1337
1338
1339 if (error == 1) {
1340
1341 cmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
1342 }
1343
1344
1345 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1346 u32 length = le32_to_cpu(full_command_packet->command.newcommand.sg_list[0].length);
1347
1348 if (length < scsi_bufflen(cmd))
1349 scsi_set_resid(cmd, scsi_bufflen(cmd) - length);
1350 }
1351
1352
1353 if (twa_command_mapped(cmd))
1354 scsi_dma_unmap(cmd);
1355 cmd->scsi_done(cmd);
1356 tw_dev->state[request_id] = TW_S_COMPLETED;
1357 twa_free_request_id(tw_dev, request_id);
1358 tw_dev->posted_request_count--;
1359 }
1360
1361
1362 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1363 if (twa_check_bits(status_reg_value)) {
1364 if (twa_decode_bits(tw_dev, status_reg_value)) {
1365 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1366 goto twa_interrupt_bail;
1367 }
1368 }
1369 }
1370 }
1371
1372twa_interrupt_bail:
1373 spin_unlock(tw_dev->host->host_lock);
1374 return IRQ_RETVAL(handled);
1375}
1376
1377
1378static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1379{
1380 TW_Command *oldcommand;
1381 TW_Command_Apache *newcommand;
1382 TW_SG_Entry *sgl;
1383 unsigned int pae = 0;
1384
1385 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1386 pae = 1;
1387
1388 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1389 newcommand = &full_command_packet->command.newcommand;
1390 newcommand->request_id__lunl =
1391 TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id);
1392 if (length) {
1393 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
1394 newcommand->sg_list[0].length = cpu_to_le32(length);
1395 }
1396 newcommand->sgl_entries__lunh =
1397 TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0);
1398 } else {
1399 oldcommand = &full_command_packet->command.oldcommand;
1400 oldcommand->request_id = request_id;
1401
1402 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1403
1404 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1405 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1406 else
1407 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1408 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
1409 sgl->length = cpu_to_le32(length);
1410
1411 oldcommand->size += pae;
1412 }
1413 }
1414}
1415
1416
1417static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1418{
1419 int retval = 1, found = 0, response_request_id;
1420 TW_Response_Queue response_queue;
1421 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1422
1423 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1424 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1425 response_request_id = TW_RESID_OUT(response_queue.response_id);
1426 if (request_id != response_request_id) {
1427 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1428 goto out;
1429 }
1430 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1431 if (full_command_packet->command.newcommand.status != 0) {
1432
1433 twa_fill_sense(tw_dev, request_id, 0, 0);
1434 goto out;
1435 }
1436 found = 1;
1437 } else {
1438 if (full_command_packet->command.oldcommand.status != 0) {
1439
1440 twa_fill_sense(tw_dev, request_id, 0, 0);
1441 goto out;
1442 }
1443 found = 1;
1444 }
1445 }
1446
1447 if (found)
1448 retval = 0;
1449out:
1450 return retval;
1451}
1452
1453
1454static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1455{
1456 u32 status_reg_value;
1457 unsigned long before;
1458 int retval = 1;
1459
1460 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1461 before = jiffies;
1462
1463 if (twa_check_bits(status_reg_value))
1464 twa_decode_bits(tw_dev, status_reg_value);
1465
1466 while ((status_reg_value & flag) != flag) {
1467 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1468
1469 if (twa_check_bits(status_reg_value))
1470 twa_decode_bits(tw_dev, status_reg_value);
1471
1472 if (time_after(jiffies, before + HZ * seconds))
1473 goto out;
1474
1475 msleep(50);
1476 }
1477 retval = 0;
1478out:
1479 return retval;
1480}
1481
1482
1483static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1484{
1485 u32 status_reg_value;
1486 unsigned long before;
1487 int retval = 1;
1488
1489 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1490 before = jiffies;
1491
1492 if (twa_check_bits(status_reg_value))
1493 twa_decode_bits(tw_dev, status_reg_value);
1494
1495 while ((status_reg_value & flag) != 0) {
1496 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1497 if (twa_check_bits(status_reg_value))
1498 twa_decode_bits(tw_dev, status_reg_value);
1499
1500 if (time_after(jiffies, before + HZ * seconds))
1501 goto out;
1502
1503 msleep(50);
1504 }
1505 retval = 0;
1506out:
1507 return retval;
1508}
1509
1510
1511static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1512{
1513 u32 status_reg_value;
1514 dma_addr_t command_que_value;
1515 int retval = 1;
1516
1517 command_que_value = tw_dev->command_packet_phys[request_id];
1518
1519
1520 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1521 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1522 command_que_value += TW_COMMAND_OFFSET;
1523 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1524 }
1525
1526 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1527
1528 if (twa_check_bits(status_reg_value))
1529 twa_decode_bits(tw_dev, status_reg_value);
1530
1531 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1532
1533
1534 if (!internal) {
1535 retval = SCSI_MLQUEUE_HOST_BUSY;
1536 goto out;
1537 }
1538
1539
1540 if (tw_dev->state[request_id] != TW_S_PENDING) {
1541 tw_dev->state[request_id] = TW_S_PENDING;
1542 tw_dev->pending_request_count++;
1543 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1544 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1545 }
1546 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1547 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1548 }
1549 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1550 goto out;
1551 } else {
1552 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1553 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1554
1555 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1556 } else {
1557 if (sizeof(dma_addr_t) > 4) {
1558 command_que_value += TW_COMMAND_OFFSET;
1559 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1560 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1561 } else {
1562 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1563 }
1564 }
1565 tw_dev->state[request_id] = TW_S_POSTED;
1566 tw_dev->posted_request_count++;
1567 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1568 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1569 }
1570 }
1571 retval = 0;
1572out:
1573 return retval;
1574}
1575
1576
1577static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1578{
1579 int i = 0;
1580 int retval = 1;
1581 unsigned long flags = 0;
1582
1583 set_bit(TW_IN_RESET, &tw_dev->flags);
1584 TW_DISABLE_INTERRUPTS(tw_dev);
1585 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1586 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1587
1588
1589 for (i = 0; i < TW_Q_LENGTH; i++) {
1590 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1591 (tw_dev->state[i] != TW_S_INITIAL) &&
1592 (tw_dev->state[i] != TW_S_COMPLETED)) {
1593 if (tw_dev->srb[i]) {
1594 struct scsi_cmnd *cmd = tw_dev->srb[i];
1595
1596 cmd->result = (DID_RESET << 16);
1597 if (twa_command_mapped(cmd))
1598 scsi_dma_unmap(cmd);
1599 cmd->scsi_done(cmd);
1600 }
1601 }
1602 }
1603
1604
1605 for (i = 0; i < TW_Q_LENGTH; i++) {
1606 tw_dev->free_queue[i] = i;
1607 tw_dev->state[i] = TW_S_INITIAL;
1608 }
1609 tw_dev->free_head = TW_Q_START;
1610 tw_dev->free_tail = TW_Q_START;
1611 tw_dev->posted_request_count = 0;
1612 tw_dev->pending_request_count = 0;
1613 tw_dev->pending_head = TW_Q_START;
1614 tw_dev->pending_tail = TW_Q_START;
1615 tw_dev->reset_print = 0;
1616
1617 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1618
1619 if (twa_reset_sequence(tw_dev, 1))
1620 goto out;
1621
1622 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1623 clear_bit(TW_IN_RESET, &tw_dev->flags);
1624 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1625
1626 retval = 0;
1627out:
1628 return retval;
1629}
1630
1631
1632static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1633{
1634 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1635
1636 while (tries < TW_MAX_RESET_TRIES) {
1637 if (do_soft_reset) {
1638 TW_SOFT_RESET(tw_dev);
1639
1640 if (twa_empty_response_queue_large(tw_dev)) {
1641 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1642 do_soft_reset = 1;
1643 tries++;
1644 continue;
1645 }
1646 }
1647
1648
1649 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1650 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1651 do_soft_reset = 1;
1652 tries++;
1653 continue;
1654 }
1655
1656
1657 if (twa_empty_response_queue(tw_dev)) {
1658 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1659 do_soft_reset = 1;
1660 tries++;
1661 continue;
1662 }
1663
1664 flashed = 0;
1665
1666
1667 if (twa_check_srl(tw_dev, &flashed)) {
1668 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1669 do_soft_reset = 1;
1670 tries++;
1671 continue;
1672 } else {
1673 if (flashed) {
1674 tries++;
1675 continue;
1676 }
1677 }
1678
1679
1680 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1681 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1682 do_soft_reset = 1;
1683 tries++;
1684 continue;
1685 }
1686
1687
1688 retval = 0;
1689 goto out;
1690 }
1691out:
1692 return retval;
1693}
1694
1695
1696static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1697{
1698 int heads, sectors, cylinders;
1699
1700 if (capacity >= 0x200000) {
1701 heads = 255;
1702 sectors = 63;
1703 cylinders = sector_div(capacity, heads * sectors);
1704 } else {
1705 heads = 64;
1706 sectors = 32;
1707 cylinders = sector_div(capacity, heads * sectors);
1708 }
1709
1710 geom[0] = heads;
1711 geom[1] = sectors;
1712 geom[2] = cylinders;
1713
1714 return 0;
1715}
1716
1717
1718static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1719{
1720 TW_Device_Extension *tw_dev = NULL;
1721 int retval = FAILED;
1722
1723 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1724
1725 tw_dev->num_resets++;
1726
1727 sdev_printk(KERN_WARNING, SCpnt->device,
1728 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1729 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1730
1731
1732 mutex_lock(&tw_dev->ioctl_lock);
1733
1734
1735 if (twa_reset_device_extension(tw_dev)) {
1736 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1737 goto out;
1738 }
1739
1740 retval = SUCCESS;
1741out:
1742 mutex_unlock(&tw_dev->ioctl_lock);
1743 return retval;
1744}
1745
1746
1747static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1748{
1749 int request_id, retval;
1750 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1751
1752
1753 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1754 retval = SCSI_MLQUEUE_HOST_BUSY;
1755 goto out;
1756 }
1757
1758
1759 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1760 SCpnt->result = (DID_BAD_TARGET << 16);
1761 done(SCpnt);
1762 retval = 0;
1763 goto out;
1764 }
1765
1766
1767 SCpnt->scsi_done = done;
1768
1769
1770 twa_get_request_id(tw_dev, &request_id);
1771
1772
1773 tw_dev->srb[request_id] = SCpnt;
1774
1775 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1776 switch (retval) {
1777 case SCSI_MLQUEUE_HOST_BUSY:
1778 if (twa_command_mapped(SCpnt))
1779 scsi_dma_unmap(SCpnt);
1780 twa_free_request_id(tw_dev, request_id);
1781 break;
1782 case 1:
1783 SCpnt->result = (DID_ERROR << 16);
1784 if (twa_command_mapped(SCpnt))
1785 scsi_dma_unmap(SCpnt);
1786 done(SCpnt);
1787 tw_dev->state[request_id] = TW_S_COMPLETED;
1788 twa_free_request_id(tw_dev, request_id);
1789 retval = 0;
1790 }
1791out:
1792 return retval;
1793}
1794
1795static DEF_SCSI_QCMD(twa_scsi_queue)
1796
1797
1798static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1799 unsigned char *cdb, int use_sg,
1800 TW_SG_Entry *sglistarg)
1801{
1802 TW_Command_Full *full_command_packet;
1803 TW_Command_Apache *command_packet;
1804 u32 num_sectors = 0x0;
1805 int i, sg_count;
1806 struct scsi_cmnd *srb = NULL;
1807 struct scatterlist *sg;
1808 int retval = 1;
1809
1810 if (tw_dev->srb[request_id])
1811 srb = tw_dev->srb[request_id];
1812
1813
1814 full_command_packet = tw_dev->command_packet_virt[request_id];
1815 full_command_packet->header.header_desc.size_header = 128;
1816 full_command_packet->header.status_block.error = 0;
1817 full_command_packet->header.status_block.severity__reserved = 0;
1818
1819 command_packet = &full_command_packet->command.newcommand;
1820 command_packet->status = 0;
1821 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1822
1823
1824 if (!cdb)
1825 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1826 else
1827 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1828
1829 if (srb) {
1830 command_packet->unit = srb->device->id;
1831 command_packet->request_id__lunl =
1832 TW_REQ_LUN_IN(srb->device->lun, request_id);
1833 } else {
1834 command_packet->request_id__lunl =
1835 TW_REQ_LUN_IN(0, request_id);
1836 command_packet->unit = 0;
1837 }
1838
1839 command_packet->sgl_offset = 16;
1840
1841 if (!sglistarg) {
1842
1843
1844 if (scsi_sg_count(srb)) {
1845 if (!twa_command_mapped(srb)) {
1846 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1847 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1848 scsi_sg_copy_to_buffer(srb,
1849 tw_dev->generic_buffer_virt[request_id],
1850 TW_SECTOR_SIZE);
1851 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1852 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1853 } else {
1854 sg_count = scsi_dma_map(srb);
1855 if (sg_count < 0)
1856 goto out;
1857
1858 scsi_for_each_sg(srb, sg, sg_count, i) {
1859 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1860 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1861 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1862 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1863 goto out;
1864 }
1865 }
1866 }
1867 command_packet->sgl_entries__lunh = TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]));
1868 }
1869 } else {
1870
1871 for (i = 0; i < use_sg; i++) {
1872 command_packet->sg_list[i].address = sglistarg[i].address;
1873 command_packet->sg_list[i].length = sglistarg[i].length;
1874 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1875 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1876 goto out;
1877 }
1878 }
1879 command_packet->sgl_entries__lunh = TW_REQ_LUN_IN(0, use_sg);
1880 }
1881
1882 if (srb) {
1883 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1884 num_sectors = (u32)srb->cmnd[4];
1885
1886 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1887 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1888 }
1889
1890
1891 tw_dev->sector_count = num_sectors;
1892 if (tw_dev->sector_count > tw_dev->max_sector_count)
1893 tw_dev->max_sector_count = tw_dev->sector_count;
1894
1895
1896 if (srb) {
1897 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1898 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1899 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1900 }
1901
1902
1903 if (srb) {
1904 retval = twa_post_command_packet(tw_dev, request_id, 0);
1905 } else {
1906 twa_post_command_packet(tw_dev, request_id, 1);
1907 retval = 0;
1908 }
1909out:
1910 return retval;
1911}
1912
1913
1914static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1915{
1916 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1917
1918 if (!twa_command_mapped(cmd) &&
1919 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1920 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1921 if (scsi_sg_count(cmd) == 1) {
1922 void *buf = tw_dev->generic_buffer_virt[request_id];
1923
1924 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1925 }
1926 }
1927}
1928
1929
1930static void __twa_shutdown(TW_Device_Extension *tw_dev)
1931{
1932
1933 TW_DISABLE_INTERRUPTS(tw_dev);
1934
1935
1936 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1937
1938 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1939
1940
1941 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1942 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1943 } else {
1944 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1945 }
1946
1947
1948 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1949}
1950
1951
1952static void twa_shutdown(struct pci_dev *pdev)
1953{
1954 struct Scsi_Host *host = pci_get_drvdata(pdev);
1955 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1956
1957 __twa_shutdown(tw_dev);
1958}
1959
1960
1961static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1962{
1963 int index;
1964
1965 for (index = 0; ((code != table[index].code) &&
1966 (table[index].text != (char *)0)); index++);
1967 return(table[index].text);
1968}
1969
1970
1971static int twa_slave_configure(struct scsi_device *sdev)
1972{
1973
1974 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1975
1976 return 0;
1977}
1978
1979
1980static struct scsi_host_template driver_template = {
1981 .module = THIS_MODULE,
1982 .name = "3ware 9000 Storage Controller",
1983 .queuecommand = twa_scsi_queue,
1984 .eh_host_reset_handler = twa_scsi_eh_reset,
1985 .bios_param = twa_scsi_biosparam,
1986 .change_queue_depth = scsi_change_queue_depth,
1987 .can_queue = TW_Q_LENGTH-2,
1988 .slave_configure = twa_slave_configure,
1989 .this_id = -1,
1990 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
1991 .max_sectors = TW_MAX_SECTORS,
1992 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1993 .shost_attrs = twa_host_attrs,
1994 .emulated = 1,
1995 .no_write_same = 1,
1996};
1997
1998
1999static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2000{
2001 struct Scsi_Host *host = NULL;
2002 TW_Device_Extension *tw_dev;
2003 unsigned long mem_addr, mem_len;
2004 int retval;
2005
2006 retval = pci_enable_device(pdev);
2007 if (retval) {
2008 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2009 goto out_disable_device;
2010 }
2011
2012 pci_set_master(pdev);
2013 pci_try_set_mwi(pdev);
2014
2015 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2016 if (retval)
2017 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2018 if (retval) {
2019 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2020 retval = -ENODEV;
2021 goto out_disable_device;
2022 }
2023
2024 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2025 if (!host) {
2026 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2027 retval = -ENOMEM;
2028 goto out_disable_device;
2029 }
2030 tw_dev = (TW_Device_Extension *)host->hostdata;
2031
2032
2033 tw_dev->host = host;
2034 tw_dev->tw_pci_dev = pdev;
2035
2036 if (twa_initialize_device_extension(tw_dev)) {
2037 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2038 retval = -ENOMEM;
2039 goto out_free_device_extension;
2040 }
2041
2042
2043 retval = pci_request_regions(pdev, "3w-9xxx");
2044 if (retval) {
2045 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2046 goto out_free_device_extension;
2047 }
2048
2049 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2050 mem_addr = pci_resource_start(pdev, 1);
2051 mem_len = pci_resource_len(pdev, 1);
2052 } else {
2053 mem_addr = pci_resource_start(pdev, 2);
2054 mem_len = pci_resource_len(pdev, 2);
2055 }
2056
2057
2058 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2059 if (!tw_dev->base_addr) {
2060 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2061 retval = -ENOMEM;
2062 goto out_release_mem_region;
2063 }
2064
2065
2066 TW_DISABLE_INTERRUPTS(tw_dev);
2067
2068
2069 if (twa_reset_sequence(tw_dev, 0)) {
2070 retval = -ENOMEM;
2071 goto out_iounmap;
2072 }
2073
2074
2075 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2076 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2077 host->max_id = TW_MAX_UNITS_9650SE;
2078 else
2079 host->max_id = TW_MAX_UNITS;
2080
2081 host->max_cmd_len = TW_MAX_CDB_LEN;
2082
2083
2084 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2085 host->max_channel = 0;
2086
2087
2088 retval = scsi_add_host(host, &pdev->dev);
2089 if (retval) {
2090 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2091 goto out_iounmap;
2092 }
2093
2094 pci_set_drvdata(pdev, host);
2095
2096 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2097 host->host_no, mem_addr, pdev->irq);
2098 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2099 host->host_no,
2100 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2101 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2102 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2103 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2104 le32_to_cpu(*(__le32 *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2105 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2106
2107
2108 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2109 !pci_enable_msi(pdev))
2110 set_bit(TW_USING_MSI, &tw_dev->flags);
2111
2112
2113 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2114 if (retval) {
2115 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2116 goto out_remove_host;
2117 }
2118
2119 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2120 twa_device_extension_count++;
2121
2122
2123 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2124
2125
2126 scsi_scan_host(host);
2127
2128 if (twa_major == -1) {
2129 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2130 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2131 }
2132 return 0;
2133
2134out_remove_host:
2135 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2136 pci_disable_msi(pdev);
2137 scsi_remove_host(host);
2138out_iounmap:
2139 iounmap(tw_dev->base_addr);
2140out_release_mem_region:
2141 pci_release_regions(pdev);
2142out_free_device_extension:
2143 twa_free_device_extension(tw_dev);
2144 scsi_host_put(host);
2145out_disable_device:
2146 pci_disable_device(pdev);
2147
2148 return retval;
2149}
2150
2151
2152static void twa_remove(struct pci_dev *pdev)
2153{
2154 struct Scsi_Host *host = pci_get_drvdata(pdev);
2155 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2156
2157 scsi_remove_host(tw_dev->host);
2158
2159
2160 if (twa_major >= 0) {
2161 unregister_chrdev(twa_major, "twa");
2162 twa_major = -1;
2163 }
2164
2165
2166 __twa_shutdown(tw_dev);
2167
2168
2169 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2170 pci_disable_msi(pdev);
2171
2172
2173 iounmap(tw_dev->base_addr);
2174
2175
2176 pci_release_regions(pdev);
2177
2178
2179 twa_free_device_extension(tw_dev);
2180
2181 scsi_host_put(tw_dev->host);
2182 pci_disable_device(pdev);
2183 twa_device_extension_count--;
2184}
2185
2186
2187static int __maybe_unused twa_suspend(struct device *dev)
2188{
2189 struct pci_dev *pdev = to_pci_dev(dev);
2190 struct Scsi_Host *host = pci_get_drvdata(pdev);
2191 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2192
2193 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2194
2195 TW_DISABLE_INTERRUPTS(tw_dev);
2196 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2197
2198 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2199 pci_disable_msi(pdev);
2200
2201
2202 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2203 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2204 } else {
2205 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2206 }
2207 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2208
2209 return 0;
2210}
2211
2212
2213static int __maybe_unused twa_resume(struct device *dev)
2214{
2215 int retval = 0;
2216 struct pci_dev *pdev = to_pci_dev(dev);
2217 struct Scsi_Host *host = pci_get_drvdata(pdev);
2218 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2219
2220 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2221
2222 pci_try_set_mwi(pdev);
2223
2224 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2225 if (retval)
2226 retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2227 if (retval) {
2228 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2229 retval = -ENODEV;
2230 goto out_disable_device;
2231 }
2232
2233
2234 if (twa_reset_sequence(tw_dev, 0)) {
2235 retval = -ENODEV;
2236 goto out_disable_device;
2237 }
2238
2239
2240 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2241 if (retval) {
2242 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2243 retval = -ENODEV;
2244 goto out_disable_device;
2245 }
2246
2247
2248 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2249 pci_enable_msi(pdev);
2250
2251
2252 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2253
2254 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2255 return 0;
2256
2257out_disable_device:
2258 scsi_remove_host(host);
2259
2260 return retval;
2261}
2262
2263
2264static struct pci_device_id twa_pci_tbl[] = {
2265 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2266 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2267 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2268 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2269 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2270 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2271 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2272 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2273 { }
2274};
2275MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2276
2277static SIMPLE_DEV_PM_OPS(twa_pm_ops, twa_suspend, twa_resume);
2278
2279
2280static struct pci_driver twa_driver = {
2281 .name = "3w-9xxx",
2282 .id_table = twa_pci_tbl,
2283 .probe = twa_probe,
2284 .remove = twa_remove,
2285 .driver.pm = &twa_pm_ops,
2286 .shutdown = twa_shutdown
2287};
2288
2289
2290static int __init twa_init(void)
2291{
2292 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2293
2294 return pci_register_driver(&twa_driver);
2295}
2296
2297
2298static void __exit twa_exit(void)
2299{
2300 pci_unregister_driver(&twa_driver);
2301}
2302
2303module_init(twa_init);
2304module_exit(twa_exit);
2305
2306