1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/delay.h>
52#include <linux/ctype.h>
53#include <linux/blkdev.h>
54#include <linux/interrupt.h>
55#include <linux/init.h>
56#include <linux/spinlock.h>
57#include <linux/pci.h>
58#include <linux/list.h>
59#include <linux/vmalloc.h>
60#include <linux/slab.h>
61#include <asm/io.h>
62
63#include <scsi/scsi.h>
64#include <scsi/scsicam.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_host.h>
68
69#include "dc395x.h"
70
71#define DC395X_NAME "dc395x"
72#define DC395X_BANNER "Tekram DC395(U/UW/F), DC315(U) - ASIC TRM-S1040"
73#define DC395X_VERSION "v2.05, 2004/03/08"
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92#define DBG_KG 0x0001
93#define DBG_0 0x0002
94#define DBG_1 0x0004
95#define DBG_SG 0x0020
96#define DBG_FIFO 0x0040
97#define DBG_PIO 0x0080
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112#define dprintkl(level, format, arg...) \
113 printk(level DC395X_NAME ": " format , ## arg)
114
115
116#ifdef DEBUG_MASK
117
118
119
120
121
122
123#define dprintkdbg(type, format, arg...) \
124 do { \
125 if ((type) & (DEBUG_MASK)) \
126 dprintkl(KERN_DEBUG , format , ## arg); \
127 } while (0)
128
129
130
131
132#define debug_enabled(type) ((DEBUG_MASK) & (type))
133
134#else
135
136
137
138#define dprintkdbg(type, format, arg...) \
139 do {} while (0)
140#define debug_enabled(type) (0)
141
142#endif
143
144
145#ifndef PCI_VENDOR_ID_TEKRAM
146#define PCI_VENDOR_ID_TEKRAM 0x1DE1
147#endif
148#ifndef PCI_DEVICE_ID_TEKRAM_TRMS1040
149#define PCI_DEVICE_ID_TEKRAM_TRMS1040 0x0391
150#endif
151
152
153#define DC395x_LOCK_IO(dev,flags) spin_lock_irqsave(((struct Scsi_Host *)dev)->host_lock, flags)
154#define DC395x_UNLOCK_IO(dev,flags) spin_unlock_irqrestore(((struct Scsi_Host *)dev)->host_lock, flags)
155
156#define DC395x_read8(acb,address) (u8)(inb(acb->io_port_base + (address)))
157#define DC395x_read16(acb,address) (u16)(inw(acb->io_port_base + (address)))
158#define DC395x_read32(acb,address) (u32)(inl(acb->io_port_base + (address)))
159#define DC395x_write8(acb,address,value) outb((value), acb->io_port_base + (address))
160#define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
161#define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
162
163
164#define RES_TARGET 0x000000FF
165#define RES_TARGET_LNX STATUS_MASK
166#define RES_ENDMSG 0x0000FF00
167#define RES_DID 0x00FF0000
168#define RES_DRV 0xFF000000
169
170#define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
171#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
172
173#define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
174#define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
175#define SET_RES_MSG(who,msg) { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; }
176#define SET_RES_DID(who,did) { who &= ~RES_DID; who |= (int)(did) << 16; }
177#define SET_RES_DRV(who,drv) { who &= ~RES_DRV; who |= (int)(drv) << 24; }
178
179#define TAG_NONE 255
180
181
182
183
184
185
186#define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
187
188
189struct SGentry {
190 u32 address;
191 u32 length;
192};
193
194
195struct NVRamTarget {
196 u8 cfg0;
197 u8 period;
198 u8 cfg2;
199 u8 cfg3;
200};
201
202struct NvRamType {
203 u8 sub_vendor_id[2];
204 u8 sub_sys_id[2];
205 u8 sub_class;
206 u8 vendor_id[2];
207 u8 device_id[2];
208 u8 reserved;
209 struct NVRamTarget target[DC395x_MAX_SCSI_ID];
210
211
212
213
214
215
216 u8 scsi_id;
217 u8 channel_cfg;
218 u8 delay_time;
219 u8 max_tag;
220 u8 reserved0;
221 u8 boot_target;
222 u8 boot_lun;
223 u8 reserved1;
224 u16 reserved2[22];
225 u16 cksum;
226};
227
228struct ScsiReqBlk {
229 struct list_head list;
230 struct DeviceCtlBlk *dcb;
231 struct scsi_cmnd *cmd;
232
233 struct SGentry *segment_x;
234 dma_addr_t sg_bus_addr;
235
236 u8 sg_count;
237 u8 sg_index;
238 size_t total_xfer_length;
239 size_t request_length;
240
241
242
243
244
245
246
247
248
249 size_t xferred;
250
251 u16 state;
252
253 u8 msgin_buf[6];
254 u8 msgout_buf[6];
255
256 u8 adapter_status;
257 u8 target_status;
258 u8 msg_count;
259 u8 end_message;
260
261 u8 tag_number;
262 u8 status;
263 u8 retry_count;
264 u8 flag;
265
266 u8 scsi_phase;
267};
268
269struct DeviceCtlBlk {
270 struct list_head list;
271 struct AdapterCtlBlk *acb;
272 struct list_head srb_going_list;
273 struct list_head srb_waiting_list;
274
275 struct ScsiReqBlk *active_srb;
276 u32 tag_mask;
277
278 u16 max_command;
279
280 u8 target_id;
281 u8 target_lun;
282 u8 identify_msg;
283 u8 dev_mode;
284
285 u8 inquiry7;
286 u8 sync_mode;
287 u8 min_nego_period;
288 u8 sync_period;
289
290 u8 sync_offset;
291 u8 flag;
292 u8 dev_type;
293 u8 init_tcq_flag;
294};
295
296struct AdapterCtlBlk {
297 struct Scsi_Host *scsi_host;
298
299 unsigned long io_port_base;
300 unsigned long io_port_len;
301
302 struct list_head dcb_list;
303 struct DeviceCtlBlk *dcb_run_robin;
304 struct DeviceCtlBlk *active_dcb;
305
306 struct list_head srb_free_list;
307 struct ScsiReqBlk *tmp_srb;
308 struct timer_list waiting_timer;
309 struct timer_list selto_timer;
310
311 unsigned long last_reset;
312
313 u16 srb_count;
314
315 u8 sel_timeout;
316
317 unsigned int irq_level;
318 u8 tag_max_num;
319 u8 acb_flag;
320 u8 gmode2;
321
322 u8 config;
323 u8 lun_chk;
324 u8 scan_devices;
325 u8 hostid_bit;
326
327 u8 dcb_map[DC395x_MAX_SCSI_ID];
328 struct DeviceCtlBlk *children[DC395x_MAX_SCSI_ID][32];
329
330 struct pci_dev *dev;
331
332 u8 msg_len;
333
334 struct ScsiReqBlk srb_array[DC395x_MAX_SRB_CNT];
335 struct ScsiReqBlk srb;
336
337 struct NvRamType eeprom;
338};
339
340
341
342
343
344static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
345 u16 *pscsi_status);
346static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
347 u16 *pscsi_status);
348static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
349 u16 *pscsi_status);
350static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
351 u16 *pscsi_status);
352static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
353 u16 *pscsi_status);
354static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
355 u16 *pscsi_status);
356static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
357 u16 *pscsi_status);
358static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
359 u16 *pscsi_status);
360static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
361 u16 *pscsi_status);
362static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
363 u16 *pscsi_status);
364static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
365 u16 *pscsi_status);
366static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
367 u16 *pscsi_status);
368static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
369 u16 *pscsi_status);
370static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
371 u16 *pscsi_status);
372static void set_basic_config(struct AdapterCtlBlk *acb);
373static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
374 struct ScsiReqBlk *srb);
375static void reset_scsi_bus(struct AdapterCtlBlk *acb);
376static void data_io_transfer(struct AdapterCtlBlk *acb,
377 struct ScsiReqBlk *srb, u16 io_dir);
378static void disconnect(struct AdapterCtlBlk *acb);
379static void reselect(struct AdapterCtlBlk *acb);
380static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
381 struct ScsiReqBlk *srb);
382static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
383 struct ScsiReqBlk *srb);
384static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
385 struct ScsiReqBlk *srb);
386static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_code,
387 struct scsi_cmnd *cmd, u8 force);
388static void scsi_reset_detect(struct AdapterCtlBlk *acb);
389static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb);
390static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
391 struct ScsiReqBlk *srb);
392static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
393 struct ScsiReqBlk *srb);
394static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
395 struct ScsiReqBlk *srb);
396static void set_xfer_rate(struct AdapterCtlBlk *acb,
397 struct DeviceCtlBlk *dcb);
398static void waiting_timeout(unsigned long ptr);
399
400
401
402
403
404static u16 current_sync_offset = 0;
405
406static void *dc395x_scsi_phase0[] = {
407 data_out_phase0,
408 data_in_phase0,
409 command_phase0,
410 status_phase0,
411 nop0,
412 nop0,
413 msgout_phase0,
414 msgin_phase0,
415};
416
417static void *dc395x_scsi_phase1[] = {
418 data_out_phase1,
419 data_in_phase1,
420 command_phase1,
421 status_phase1,
422 nop1,
423 nop1,
424 msgout_phase1,
425 msgin_phase1,
426};
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 };
451static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 };
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467#define CFG_ADAPTER_ID 0
468#define CFG_MAX_SPEED 1
469#define CFG_DEV_MODE 2
470#define CFG_ADAPTER_MODE 3
471#define CFG_TAGS 4
472#define CFG_RESET_DELAY 5
473
474#define CFG_NUM 6
475
476
477
478
479
480
481#define CFG_PARAM_UNSET -1
482
483
484
485
486
487struct ParameterData {
488 int value;
489 int min;
490 int max;
491 int def;
492 int safe;
493};
494static struct ParameterData cfg_data[] = {
495 {
496 CFG_PARAM_UNSET,
497 0,
498 15,
499 7,
500 7
501 },
502 {
503 CFG_PARAM_UNSET,
504 0,
505 7,
506 1,
507 4,
508 },
509 {
510 CFG_PARAM_UNSET,
511 0,
512 0x3f,
513 NTC_DO_PARITY_CHK | NTC_DO_DISCONNECT | NTC_DO_SYNC_NEGO |
514 NTC_DO_WIDE_NEGO | NTC_DO_TAG_QUEUEING |
515 NTC_DO_SEND_START,
516 NTC_DO_PARITY_CHK | NTC_DO_SEND_START
517 },
518 {
519 CFG_PARAM_UNSET,
520 0,
521 0x2f,
522 NAC_SCANLUN |
523 NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET
524 ,
525 NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08
526 },
527 {
528 CFG_PARAM_UNSET,
529 0,
530 5,
531 3,
532 2,
533 },
534 {
535 CFG_PARAM_UNSET,
536 0,
537 180,
538 1,
539 10,
540 }
541};
542
543
544
545
546
547
548
549static bool use_safe_settings = 0;
550module_param_named(safe, use_safe_settings, bool, 0);
551MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false");
552
553
554module_param_named(adapter_id, cfg_data[CFG_ADAPTER_ID].value, int, 0);
555MODULE_PARM_DESC(adapter_id, "Adapter SCSI ID. Default 7 (0-15)");
556
557module_param_named(max_speed, cfg_data[CFG_MAX_SPEED].value, int, 0);
558MODULE_PARM_DESC(max_speed, "Maximum bus speed. Default 1 (0-7) Speeds: 0=20, 1=13.3, 2=10, 3=8, 4=6.7, 5=5.8, 6=5, 7=4 Mhz");
559
560module_param_named(dev_mode, cfg_data[CFG_DEV_MODE].value, int, 0);
561MODULE_PARM_DESC(dev_mode, "Device mode.");
562
563module_param_named(adapter_mode, cfg_data[CFG_ADAPTER_MODE].value, int, 0);
564MODULE_PARM_DESC(adapter_mode, "Adapter mode.");
565
566module_param_named(tags, cfg_data[CFG_TAGS].value, int, 0);
567MODULE_PARM_DESC(tags, "Number of tags (1<<x). Default 3 (0-5)");
568
569module_param_named(reset_delay, cfg_data[CFG_RESET_DELAY].value, int, 0);
570MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)");
571
572
573
574
575
576
577static void set_safe_settings(void)
578{
579 if (use_safe_settings)
580 {
581 int i;
582
583 dprintkl(KERN_INFO, "Using safe settings.\n");
584 for (i = 0; i < CFG_NUM; i++)
585 {
586 cfg_data[i].value = cfg_data[i].safe;
587 }
588 }
589}
590
591
592
593
594
595
596static void fix_settings(void)
597{
598 int i;
599
600 dprintkdbg(DBG_1,
601 "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x "
602 "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n",
603 cfg_data[CFG_ADAPTER_ID].value,
604 cfg_data[CFG_MAX_SPEED].value,
605 cfg_data[CFG_DEV_MODE].value,
606 cfg_data[CFG_ADAPTER_MODE].value,
607 cfg_data[CFG_TAGS].value,
608 cfg_data[CFG_RESET_DELAY].value);
609 for (i = 0; i < CFG_NUM; i++)
610 {
611 if (cfg_data[i].value < cfg_data[i].min
612 || cfg_data[i].value > cfg_data[i].max)
613 cfg_data[i].value = cfg_data[i].def;
614 }
615}
616
617
618
619
620
621
622
623static char eeprom_index_to_delay_map[] =
624 { 1, 3, 5, 10, 16, 30, 60, 120 };
625
626
627
628
629
630
631
632
633static void eeprom_index_to_delay(struct NvRamType *eeprom)
634{
635 eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time];
636}
637
638
639
640
641
642
643
644
645
646static int delay_to_eeprom_index(int delay)
647{
648 u8 idx = 0;
649 while (idx < 7 && eeprom_index_to_delay_map[idx] < delay)
650 idx++;
651 return idx;
652}
653
654
655
656
657
658
659
660
661
662static void eeprom_override(struct NvRamType *eeprom)
663{
664 u8 id;
665
666
667 if (cfg_data[CFG_ADAPTER_ID].value != CFG_PARAM_UNSET)
668 eeprom->scsi_id = (u8)cfg_data[CFG_ADAPTER_ID].value;
669
670 if (cfg_data[CFG_ADAPTER_MODE].value != CFG_PARAM_UNSET)
671 eeprom->channel_cfg = (u8)cfg_data[CFG_ADAPTER_MODE].value;
672
673 if (cfg_data[CFG_RESET_DELAY].value != CFG_PARAM_UNSET)
674 eeprom->delay_time = delay_to_eeprom_index(
675 cfg_data[CFG_RESET_DELAY].value);
676
677 if (cfg_data[CFG_TAGS].value != CFG_PARAM_UNSET)
678 eeprom->max_tag = (u8)cfg_data[CFG_TAGS].value;
679
680
681 for (id = 0; id < DC395x_MAX_SCSI_ID; id++) {
682 if (cfg_data[CFG_DEV_MODE].value != CFG_PARAM_UNSET)
683 eeprom->target[id].cfg0 =
684 (u8)cfg_data[CFG_DEV_MODE].value;
685
686 if (cfg_data[CFG_MAX_SPEED].value != CFG_PARAM_UNSET)
687 eeprom->target[id].period =
688 (u8)cfg_data[CFG_MAX_SPEED].value;
689
690 }
691}
692
693
694
695
696
697static unsigned int list_size(struct list_head *head)
698{
699 unsigned int count = 0;
700 struct list_head *pos;
701 list_for_each(pos, head)
702 count++;
703 return count;
704}
705
706
707static struct DeviceCtlBlk *dcb_get_next(struct list_head *head,
708 struct DeviceCtlBlk *pos)
709{
710 int use_next = 0;
711 struct DeviceCtlBlk* next = NULL;
712 struct DeviceCtlBlk* i;
713
714 if (list_empty(head))
715 return NULL;
716
717
718 list_for_each_entry(i, head, list)
719 if (use_next) {
720 next = i;
721 break;
722 } else if (i == pos) {
723 use_next = 1;
724 }
725
726 if (!next)
727 list_for_each_entry(i, head, list) {
728 next = i;
729 break;
730 }
731
732 return next;
733}
734
735
736static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
737{
738 if (srb->tag_number < 255) {
739 dcb->tag_mask &= ~(1 << srb->tag_number);
740 srb->tag_number = 255;
741 }
742}
743
744
745
746static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
747 struct list_head *head)
748{
749 struct ScsiReqBlk *i;
750 list_for_each_entry(i, head, list)
751 if (i->cmd == cmd)
752 return i;
753 return NULL;
754}
755
756
757static struct ScsiReqBlk *srb_get_free(struct AdapterCtlBlk *acb)
758{
759 struct list_head *head = &acb->srb_free_list;
760 struct ScsiReqBlk *srb = NULL;
761
762 if (!list_empty(head)) {
763 srb = list_entry(head->next, struct ScsiReqBlk, list);
764 list_del(head->next);
765 dprintkdbg(DBG_0, "srb_get_free: srb=%p\n", srb);
766 }
767 return srb;
768}
769
770
771static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
772{
773 dprintkdbg(DBG_0, "srb_free_insert: srb=%p\n", srb);
774 list_add_tail(&srb->list, &acb->srb_free_list);
775}
776
777
778static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
779 struct ScsiReqBlk *srb)
780{
781 dprintkdbg(DBG_0, "srb_waiting_insert: (0x%p) <%02i-%i> srb=%p\n",
782 srb->cmd, dcb->target_id, dcb->target_lun, srb);
783 list_add(&srb->list, &dcb->srb_waiting_list);
784}
785
786
787static void srb_waiting_append(struct DeviceCtlBlk *dcb,
788 struct ScsiReqBlk *srb)
789{
790 dprintkdbg(DBG_0, "srb_waiting_append: (0x%p) <%02i-%i> srb=%p\n",
791 srb->cmd, dcb->target_id, dcb->target_lun, srb);
792 list_add_tail(&srb->list, &dcb->srb_waiting_list);
793}
794
795
796static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
797{
798 dprintkdbg(DBG_0, "srb_going_append: (0x%p) <%02i-%i> srb=%p\n",
799 srb->cmd, dcb->target_id, dcb->target_lun, srb);
800 list_add_tail(&srb->list, &dcb->srb_going_list);
801}
802
803
804static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
805{
806 struct ScsiReqBlk *i;
807 struct ScsiReqBlk *tmp;
808 dprintkdbg(DBG_0, "srb_going_remove: (0x%p) <%02i-%i> srb=%p\n",
809 srb->cmd, dcb->target_id, dcb->target_lun, srb);
810
811 list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
812 if (i == srb) {
813 list_del(&srb->list);
814 break;
815 }
816}
817
818
819static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
820 struct ScsiReqBlk *srb)
821{
822 struct ScsiReqBlk *i;
823 struct ScsiReqBlk *tmp;
824 dprintkdbg(DBG_0, "srb_waiting_remove: (0x%p) <%02i-%i> srb=%p\n",
825 srb->cmd, dcb->target_id, dcb->target_lun, srb);
826
827 list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
828 if (i == srb) {
829 list_del(&srb->list);
830 break;
831 }
832}
833
834
835static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
836 struct ScsiReqBlk *srb)
837{
838 dprintkdbg(DBG_0,
839 "srb_going_to_waiting_move: (0x%p) <%02i-%i> srb=%p\n",
840 srb->cmd, dcb->target_id, dcb->target_lun, srb);
841 list_move(&srb->list, &dcb->srb_waiting_list);
842}
843
844
845static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
846 struct ScsiReqBlk *srb)
847{
848 dprintkdbg(DBG_0,
849 "srb_waiting_to_going_move: (0x%p) <%02i-%i> srb=%p\n",
850 srb->cmd, dcb->target_id, dcb->target_lun, srb);
851 list_move(&srb->list, &dcb->srb_going_list);
852}
853
854
855
856static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
857{
858 if (timer_pending(&acb->waiting_timer))
859 return;
860 init_timer(&acb->waiting_timer);
861 acb->waiting_timer.function = waiting_timeout;
862 acb->waiting_timer.data = (unsigned long) acb;
863 if (time_before(jiffies + to, acb->last_reset - HZ / 2))
864 acb->waiting_timer.expires =
865 acb->last_reset - HZ / 2 + 1;
866 else
867 acb->waiting_timer.expires = jiffies + to + 1;
868 add_timer(&acb->waiting_timer);
869}
870
871
872
873static void waiting_process_next(struct AdapterCtlBlk *acb)
874{
875 struct DeviceCtlBlk *start = NULL;
876 struct DeviceCtlBlk *pos;
877 struct DeviceCtlBlk *dcb;
878 struct ScsiReqBlk *srb;
879 struct list_head *dcb_list_head = &acb->dcb_list;
880
881 if (acb->active_dcb
882 || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV)))
883 return;
884
885 if (timer_pending(&acb->waiting_timer))
886 del_timer(&acb->waiting_timer);
887
888 if (list_empty(dcb_list_head))
889 return;
890
891
892
893
894
895 list_for_each_entry(dcb, dcb_list_head, list)
896 if (dcb == acb->dcb_run_robin) {
897 start = dcb;
898 break;
899 }
900 if (!start) {
901
902 start = list_entry(dcb_list_head->next, typeof(*start), list);
903 acb->dcb_run_robin = start;
904 }
905
906
907
908
909
910
911 pos = start;
912 do {
913 struct list_head *waiting_list_head = &pos->srb_waiting_list;
914
915
916 acb->dcb_run_robin = dcb_get_next(dcb_list_head,
917 acb->dcb_run_robin);
918
919 if (list_empty(waiting_list_head) ||
920 pos->max_command <= list_size(&pos->srb_going_list)) {
921
922 pos = dcb_get_next(dcb_list_head, pos);
923 } else {
924 srb = list_entry(waiting_list_head->next,
925 struct ScsiReqBlk, list);
926
927
928 if (!start_scsi(acb, pos, srb))
929 srb_waiting_to_going_move(pos, srb);
930 else
931 waiting_set_timer(acb, HZ/50);
932 break;
933 }
934 } while (pos != start);
935}
936
937
938
939static void waiting_timeout(unsigned long ptr)
940{
941 unsigned long flags;
942 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
943 dprintkdbg(DBG_1,
944 "waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
945 DC395x_LOCK_IO(acb->scsi_host, flags);
946 waiting_process_next(acb);
947 DC395x_UNLOCK_IO(acb->scsi_host, flags);
948}
949
950
951
952static struct DeviceCtlBlk *find_dcb(struct AdapterCtlBlk *acb, u8 id, u8 lun)
953{
954 return acb->children[id][lun];
955}
956
957
958
959static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
960{
961 struct DeviceCtlBlk *dcb = srb->dcb;
962
963 if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
964 acb->active_dcb ||
965 (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
966 srb_waiting_append(dcb, srb);
967 waiting_process_next(acb);
968 return;
969 }
970
971 if (!start_scsi(acb, dcb, srb))
972 srb_going_append(dcb, srb);
973 else {
974 srb_waiting_insert(dcb, srb);
975 waiting_set_timer(acb, HZ / 50);
976 }
977}
978
979
980static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
981 struct ScsiReqBlk *srb)
982{
983 int nseg;
984 enum dma_data_direction dir = cmd->sc_data_direction;
985 dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n",
986 cmd, dcb->target_id, dcb->target_lun);
987
988 srb->dcb = dcb;
989 srb->cmd = cmd;
990 srb->sg_count = 0;
991 srb->total_xfer_length = 0;
992 srb->sg_bus_addr = 0;
993 srb->sg_index = 0;
994 srb->adapter_status = 0;
995 srb->target_status = 0;
996 srb->msg_count = 0;
997 srb->status = 0;
998 srb->flag = 0;
999 srb->state = 0;
1000 srb->retry_count = 0;
1001 srb->tag_number = TAG_NONE;
1002 srb->scsi_phase = PH_BUS_FREE;
1003 srb->end_message = 0;
1004
1005 nseg = scsi_dma_map(cmd);
1006 BUG_ON(nseg < 0);
1007
1008 if (dir == PCI_DMA_NONE || !nseg) {
1009 dprintkdbg(DBG_0,
1010 "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
1011 cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
1012 srb->segment_x[0].address);
1013 } else {
1014 int i;
1015 u32 reqlen = scsi_bufflen(cmd);
1016 struct scatterlist *sg;
1017 struct SGentry *sgp = srb->segment_x;
1018
1019 srb->sg_count = nseg;
1020
1021 dprintkdbg(DBG_0,
1022 "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
1023 reqlen, scsi_sglist(cmd), scsi_sg_count(cmd),
1024 srb->sg_count);
1025
1026 scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
1027 u32 busaddr = (u32)sg_dma_address(sg);
1028 u32 seglen = (u32)sg->length;
1029 sgp[i].address = busaddr;
1030 sgp[i].length = seglen;
1031 srb->total_xfer_length += seglen;
1032 }
1033 sgp += srb->sg_count - 1;
1034
1035
1036
1037
1038
1039 if (srb->total_xfer_length > reqlen) {
1040 sgp->length -= (srb->total_xfer_length - reqlen);
1041 srb->total_xfer_length = reqlen;
1042 }
1043
1044
1045 if (dcb->sync_period & WIDE_SYNC &&
1046 srb->total_xfer_length % 2) {
1047 srb->total_xfer_length++;
1048 sgp->length++;
1049 }
1050
1051 srb->sg_bus_addr = pci_map_single(dcb->acb->dev,
1052 srb->segment_x,
1053 SEGMENTX_LEN,
1054 PCI_DMA_TODEVICE);
1055
1056 dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
1057 srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
1058 }
1059
1060 srb->request_length = srb->total_xfer_length;
1061}
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
1084{
1085 struct DeviceCtlBlk *dcb;
1086 struct ScsiReqBlk *srb;
1087 struct AdapterCtlBlk *acb =
1088 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1089 dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n",
1090 cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
1091
1092
1093 cmd->result = DID_BAD_TARGET << 16;
1094
1095
1096 if (cmd->device->id >= acb->scsi_host->max_id ||
1097 cmd->device->lun >= acb->scsi_host->max_lun ||
1098 cmd->device->lun >31) {
1099 goto complete;
1100 }
1101
1102
1103 if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
1104 dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
1105 cmd->device->id, (u8)cmd->device->lun);
1106 goto complete;
1107 }
1108
1109
1110 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
1111 if (!dcb) {
1112
1113 dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
1114 cmd->device->id, (u8)cmd->device->lun);
1115 goto complete;
1116 }
1117
1118
1119 cmd->scsi_done = done;
1120 cmd->result = 0;
1121
1122 srb = srb_get_free(acb);
1123 if (!srb)
1124 {
1125
1126
1127
1128
1129 dprintkdbg(DBG_0, "queue_command: No free srb's\n");
1130 return 1;
1131 }
1132
1133 build_srb(cmd, dcb, srb);
1134
1135 if (!list_empty(&dcb->srb_waiting_list)) {
1136
1137 srb_waiting_append(dcb, srb);
1138 waiting_process_next(acb);
1139 } else {
1140
1141 send_srb(acb, srb);
1142 }
1143 dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd);
1144 return 0;
1145
1146complete:
1147
1148
1149
1150
1151
1152
1153 done(cmd);
1154 return 0;
1155}
1156
1157static DEF_SCSI_QCMD(dc395x_queue_command)
1158
1159
1160
1161
1162static int dc395x_bios_param(struct scsi_device *sdev,
1163 struct block_device *bdev, sector_t capacity, int *info)
1164{
1165#ifdef CONFIG_SCSI_DC395x_TRMS1040_TRADMAP
1166 int heads, sectors, cylinders;
1167 struct AdapterCtlBlk *acb;
1168 int size = capacity;
1169
1170 dprintkdbg(DBG_0, "dc395x_bios_param..............\n");
1171 acb = (struct AdapterCtlBlk *)sdev->host->hostdata;
1172 heads = 64;
1173 sectors = 32;
1174 cylinders = size / (heads * sectors);
1175
1176 if ((acb->gmode2 & NAC_GREATER_1G) && (cylinders > 1024)) {
1177 heads = 255;
1178 sectors = 63;
1179 cylinders = size / (heads * sectors);
1180 }
1181 geom[0] = heads;
1182 geom[1] = sectors;
1183 geom[2] = cylinders;
1184 return 0;
1185#else
1186 return scsicam_bios_param(bdev, capacity, info);
1187#endif
1188}
1189
1190
1191static void dump_register_info(struct AdapterCtlBlk *acb,
1192 struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
1193{
1194 u16 pstat;
1195 struct pci_dev *dev = acb->dev;
1196 pci_read_config_word(dev, PCI_STATUS, &pstat);
1197 if (!dcb)
1198 dcb = acb->active_dcb;
1199 if (!srb && dcb)
1200 srb = dcb->active_srb;
1201 if (srb) {
1202 if (!srb->cmd)
1203 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
1204 srb, srb->cmd);
1205 else
1206 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p "
1207 "cmnd=0x%02x <%02i-%i>\n",
1208 srb, srb->cmd,
1209 srb->cmd->cmnd[0], srb->cmd->device->id,
1210 (u8)srb->cmd->device->lun);
1211 printk(" sglist=%p cnt=%i idx=%i len=%zu\n",
1212 srb->segment_x, srb->sg_count, srb->sg_index,
1213 srb->total_xfer_length);
1214 printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
1215 srb->state, srb->status, srb->scsi_phase,
1216 (acb->active_dcb) ? "" : "not");
1217 }
1218 dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x "
1219 "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x "
1220 "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x "
1221 "config2=0x%02x cmd=0x%02x selto=0x%02x}\n",
1222 DC395x_read16(acb, TRM_S1040_SCSI_STATUS),
1223 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
1224 DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL),
1225 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS),
1226 DC395x_read8(acb, TRM_S1040_SCSI_SYNC),
1227 DC395x_read8(acb, TRM_S1040_SCSI_TARGETID),
1228 DC395x_read8(acb, TRM_S1040_SCSI_IDMSG),
1229 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
1230 DC395x_read8(acb, TRM_S1040_SCSI_INTEN),
1231 DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0),
1232 DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2),
1233 DC395x_read8(acb, TRM_S1040_SCSI_COMMAND),
1234 DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT));
1235 dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x "
1236 "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x "
1237 "ctctr=0x%08x addr=0x%08x:0x%08x}\n",
1238 DC395x_read16(acb, TRM_S1040_DMA_COMMAND),
1239 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
1240 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
1241 DC395x_read8(acb, TRM_S1040_DMA_STATUS),
1242 DC395x_read8(acb, TRM_S1040_DMA_INTEN),
1243 DC395x_read16(acb, TRM_S1040_DMA_CONFIG),
1244 DC395x_read32(acb, TRM_S1040_DMA_XCNT),
1245 DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
1246 DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR),
1247 DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR));
1248 dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} "
1249 "pci{status=0x%04x}\n",
1250 DC395x_read8(acb, TRM_S1040_GEN_CONTROL),
1251 DC395x_read8(acb, TRM_S1040_GEN_STATUS),
1252 DC395x_read8(acb, TRM_S1040_GEN_TIMER),
1253 pstat);
1254}
1255
1256
1257static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt)
1258{
1259#if debug_enabled(DBG_FIFO)
1260 u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
1261 u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
1262 if (!(fifocnt & 0x40))
1263 dprintkdbg(DBG_FIFO,
1264 "clear_fifo: (%i bytes) on phase %02x in %s\n",
1265 fifocnt & 0x3f, lines, txt);
1266#endif
1267 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO);
1268}
1269
1270
1271static void reset_dev_param(struct AdapterCtlBlk *acb)
1272{
1273 struct DeviceCtlBlk *dcb;
1274 struct NvRamType *eeprom = &acb->eeprom;
1275 dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb);
1276
1277 list_for_each_entry(dcb, &acb->dcb_list, list) {
1278 u8 period_index;
1279
1280 dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE);
1281 dcb->sync_period = 0;
1282 dcb->sync_offset = 0;
1283
1284 dcb->dev_mode = eeprom->target[dcb->target_id].cfg0;
1285 period_index = eeprom->target[dcb->target_id].period & 0x07;
1286 dcb->min_nego_period = clock_period[period_index];
1287 if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO)
1288 || !(acb->config & HCC_WIDE_CARD))
1289 dcb->sync_mode &= ~WIDE_NEGO_ENABLE;
1290 }
1291}
1292
1293
1294
1295
1296
1297
1298
1299static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
1300{
1301 struct AdapterCtlBlk *acb =
1302 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1303 dprintkl(KERN_INFO,
1304 "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n",
1305 cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
1306
1307 if (timer_pending(&acb->waiting_timer))
1308 del_timer(&acb->waiting_timer);
1309
1310
1311
1312
1313 DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
1314 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
1315 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
1316 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
1317
1318 reset_scsi_bus(acb);
1319 udelay(500);
1320
1321
1322 acb->last_reset =
1323 jiffies + 3 * HZ / 2 +
1324 HZ * acb->eeprom.delay_time;
1325
1326
1327
1328
1329
1330 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
1331 clear_fifo(acb, "eh_bus_reset");
1332
1333 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
1334 set_basic_config(acb);
1335
1336 reset_dev_param(acb);
1337 doing_srb_done(acb, DID_RESET, cmd, 0);
1338 acb->active_dcb = NULL;
1339 acb->acb_flag = 0;
1340 waiting_process_next(acb);
1341
1342 return SUCCESS;
1343}
1344
1345static int dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
1346{
1347 int rc;
1348
1349 spin_lock_irq(cmd->device->host->host_lock);
1350 rc = __dc395x_eh_bus_reset(cmd);
1351 spin_unlock_irq(cmd->device->host->host_lock);
1352
1353 return rc;
1354}
1355
1356
1357
1358
1359
1360
1361static int dc395x_eh_abort(struct scsi_cmnd *cmd)
1362{
1363
1364
1365
1366
1367 struct AdapterCtlBlk *acb =
1368 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1369 struct DeviceCtlBlk *dcb;
1370 struct ScsiReqBlk *srb;
1371 dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n",
1372 cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
1373
1374 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
1375 if (!dcb) {
1376 dprintkl(KERN_DEBUG, "eh_abort: No such device\n");
1377 return FAILED;
1378 }
1379
1380 srb = find_cmd(cmd, &dcb->srb_waiting_list);
1381 if (srb) {
1382 srb_waiting_remove(dcb, srb);
1383 pci_unmap_srb_sense(acb, srb);
1384 pci_unmap_srb(acb, srb);
1385 free_tag(dcb, srb);
1386 srb_free_insert(acb, srb);
1387 dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
1388 cmd->result = DID_ABORT << 16;
1389 return SUCCESS;
1390 }
1391 srb = find_cmd(cmd, &dcb->srb_going_list);
1392 if (srb) {
1393 dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n");
1394
1395 } else {
1396 dprintkl(KERN_DEBUG, "eh_abort: Command not found\n");
1397 }
1398 return FAILED;
1399}
1400
1401
1402
1403static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
1404 struct ScsiReqBlk *srb)
1405{
1406 u8 *ptr = srb->msgout_buf + srb->msg_count;
1407 if (srb->msg_count > 1) {
1408 dprintkl(KERN_INFO,
1409 "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n",
1410 srb->msg_count, srb->msgout_buf[0],
1411 srb->msgout_buf[1]);
1412 return;
1413 }
1414 if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
1415 dcb->sync_offset = 0;
1416 dcb->min_nego_period = 200 >> 2;
1417 } else if (dcb->sync_offset == 0)
1418 dcb->sync_offset = SYNC_NEGO_OFFSET;
1419
1420 *ptr++ = MSG_EXTENDED;
1421 *ptr++ = 3;
1422 *ptr++ = EXTENDED_SDTR;
1423 *ptr++ = dcb->min_nego_period;
1424 *ptr++ = dcb->sync_offset;
1425 srb->msg_count += 5;
1426 srb->state |= SRB_DO_SYNC_NEGO;
1427}
1428
1429
1430
1431static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
1432 struct ScsiReqBlk *srb)
1433{
1434 u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
1435 (acb->config & HCC_WIDE_CARD)) ? 1 : 0;
1436 u8 *ptr = srb->msgout_buf + srb->msg_count;
1437 if (srb->msg_count > 1) {
1438 dprintkl(KERN_INFO,
1439 "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n",
1440 srb->msg_count, srb->msgout_buf[0],
1441 srb->msgout_buf[1]);
1442 return;
1443 }
1444 *ptr++ = MSG_EXTENDED;
1445 *ptr++ = 2;
1446 *ptr++ = EXTENDED_WDTR;
1447 *ptr++ = wide;
1448 srb->msg_count += 4;
1449 srb->state |= SRB_DO_WIDE_NEGO;
1450}
1451
1452
1453#if 0
1454
1455
1456void selection_timeout_missed(unsigned long ptr);
1457
1458static void selto_timer(struct AdapterCtlBlk *acb)
1459{
1460 if (timer_pending(&acb->selto_timer))
1461 return;
1462 acb->selto_timer.function = selection_timeout_missed;
1463 acb->selto_timer.data = (unsigned long) acb;
1464 if (time_before
1465 (jiffies + HZ, acb->last_reset + HZ / 2))
1466 acb->selto_timer.expires =
1467 acb->last_reset + HZ / 2 + 1;
1468 else
1469 acb->selto_timer.expires = jiffies + HZ + 1;
1470 add_timer(&acb->selto_timer);
1471}
1472
1473
1474void selection_timeout_missed(unsigned long ptr)
1475{
1476 unsigned long flags;
1477 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
1478 struct ScsiReqBlk *srb;
1479 dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n");
1480 if (!acb->active_dcb || !acb->active_dcb->active_srb) {
1481 dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n");
1482 return;
1483 }
1484 DC395x_LOCK_IO(acb->scsi_host, flags);
1485 srb = acb->active_dcb->active_srb;
1486 disconnect(acb);
1487 DC395x_UNLOCK_IO(acb->scsi_host, flags);
1488}
1489#endif
1490
1491
1492static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1493 struct ScsiReqBlk* srb)
1494{
1495 u16 s_stat2, return_code;
1496 u8 s_stat, scsicommand, i, identify_message;
1497 u8 *ptr;
1498 dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
1499 dcb->target_id, dcb->target_lun, srb);
1500
1501 srb->tag_number = TAG_NONE;
1502
1503 s_stat = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
1504 s_stat2 = 0;
1505 s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
1506#if 1
1507 if (s_stat & 0x20 ) {
1508 dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n",
1509 s_stat, s_stat2);
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521 return 1;
1522 }
1523#endif
1524 if (acb->active_dcb) {
1525 dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a"
1526 "command while another command (0x%p) is active.",
1527 srb->cmd,
1528 acb->active_dcb->active_srb ?
1529 acb->active_dcb->active_srb->cmd : 0);
1530 return 1;
1531 }
1532 if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
1533 dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd);
1534 return 1;
1535 }
1536
1537
1538 if (time_before(jiffies, acb->last_reset - HZ / 2)) {
1539 dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
1540 return 1;
1541 }
1542
1543
1544 clear_fifo(acb, "start_scsi");
1545 DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
1546 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
1547 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
1548 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
1549 srb->scsi_phase = PH_BUS_FREE;
1550
1551 identify_message = dcb->identify_msg;
1552
1553
1554 if (srb->flag & AUTO_REQSENSE)
1555 identify_message &= 0xBF;
1556
1557 if (((srb->cmd->cmnd[0] == INQUIRY)
1558 || (srb->cmd->cmnd[0] == REQUEST_SENSE)
1559 || (srb->flag & AUTO_REQSENSE))
1560 && (((dcb->sync_mode & WIDE_NEGO_ENABLE)
1561 && !(dcb->sync_mode & WIDE_NEGO_DONE))
1562 || ((dcb->sync_mode & SYNC_NEGO_ENABLE)
1563 && !(dcb->sync_mode & SYNC_NEGO_DONE)))
1564 && (dcb->target_lun == 0)) {
1565 srb->msgout_buf[0] = identify_message;
1566 srb->msg_count = 1;
1567 scsicommand = SCMD_SEL_ATNSTOP;
1568 srb->state = SRB_MSGOUT;
1569#ifndef SYNC_FIRST
1570 if (dcb->sync_mode & WIDE_NEGO_ENABLE
1571 && dcb->inquiry7 & SCSI_INQ_WBUS16) {
1572 build_wdtr(acb, dcb, srb);
1573 goto no_cmd;
1574 }
1575#endif
1576 if (dcb->sync_mode & SYNC_NEGO_ENABLE
1577 && dcb->inquiry7 & SCSI_INQ_SYNC) {
1578 build_sdtr(acb, dcb, srb);
1579 goto no_cmd;
1580 }
1581 if (dcb->sync_mode & WIDE_NEGO_ENABLE
1582 && dcb->inquiry7 & SCSI_INQ_WBUS16) {
1583 build_wdtr(acb, dcb, srb);
1584 goto no_cmd;
1585 }
1586 srb->msg_count = 0;
1587 }
1588
1589 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, identify_message);
1590
1591 scsicommand = SCMD_SEL_ATN;
1592 srb->state = SRB_START_;
1593#ifndef DC395x_NO_TAGQ
1594 if ((dcb->sync_mode & EN_TAG_QUEUEING)
1595 && (identify_message & 0xC0)) {
1596
1597 u32 tag_mask = 1;
1598 u8 tag_number = 0;
1599 while (tag_mask & dcb->tag_mask
1600 && tag_number < dcb->max_command) {
1601 tag_mask = tag_mask << 1;
1602 tag_number++;
1603 }
1604 if (tag_number >= dcb->max_command) {
1605 dprintkl(KERN_WARNING, "start_scsi: (0x%p) "
1606 "Out of tags target=<%02i-%i>)\n",
1607 srb->cmd, srb->cmd->device->id,
1608 (u8)srb->cmd->device->lun);
1609 srb->state = SRB_READY;
1610 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
1611 DO_HWRESELECT);
1612 return 1;
1613 }
1614
1615 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_SIMPLE_QTAG);
1616 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number);
1617 dcb->tag_mask |= tag_mask;
1618 srb->tag_number = tag_number;
1619 scsicommand = SCMD_SEL_ATN3;
1620 srb->state = SRB_START_;
1621 }
1622#endif
1623
1624
1625 dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n",
1626 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
1627 srb->cmd->cmnd[0], srb->tag_number);
1628 if (srb->flag & AUTO_REQSENSE) {
1629 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
1630 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1631 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1632 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1633 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
1634 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1635 } else {
1636 ptr = (u8 *)srb->cmd->cmnd;
1637 for (i = 0; i < srb->cmd->cmd_len; i++)
1638 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
1639 }
1640 no_cmd:
1641 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
1642 DO_HWRESELECT | DO_DATALATCH);
1643 if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
1644
1645
1646
1647
1648
1649 dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n",
1650 srb->cmd, dcb->target_id, dcb->target_lun);
1651 srb->state = SRB_READY;
1652 free_tag(dcb, srb);
1653 srb->msg_count = 0;
1654 return_code = 1;
1655
1656 } else {
1657
1658
1659
1660
1661 srb->scsi_phase = PH_BUS_FREE;
1662 dcb->active_srb = srb;
1663 acb->active_dcb = dcb;
1664 return_code = 0;
1665
1666 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
1667 DO_DATALATCH | DO_HWRESELECT);
1668
1669 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, scsicommand);
1670 }
1671 return return_code;
1672}
1673
1674
1675#define DC395x_ENABLE_MSGOUT \
1676 DC395x_write16 (acb, TRM_S1040_SCSI_CONTROL, DO_SETATN); \
1677 srb->state |= SRB_MSGOUT
1678
1679
1680
1681static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
1682 struct ScsiReqBlk *srb)
1683{
1684 srb->msgout_buf[0] = ABORT;
1685 srb->msg_count = 1;
1686 DC395x_ENABLE_MSGOUT;
1687 srb->state &= ~SRB_MSGIN;
1688 srb->state |= SRB_MSGOUT;
1689}
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
1700 u16 scsi_status)
1701{
1702 struct DeviceCtlBlk *dcb;
1703 struct ScsiReqBlk *srb;
1704 u16 phase;
1705 u8 scsi_intstatus;
1706 unsigned long flags;
1707 void (*dc395x_statev)(struct AdapterCtlBlk *, struct ScsiReqBlk *,
1708 u16 *);
1709
1710 DC395x_LOCK_IO(acb->scsi_host, flags);
1711
1712
1713 scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
1714 if ((scsi_status & 0x2007) == 0x2002)
1715 dprintkl(KERN_DEBUG,
1716 "COP after COP completed? %04x\n", scsi_status);
1717 if (debug_enabled(DBG_KG)) {
1718 if (scsi_intstatus & INT_SELTIMEOUT)
1719 dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n");
1720 }
1721
1722
1723 if (timer_pending(&acb->selto_timer))
1724 del_timer(&acb->selto_timer);
1725
1726 if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) {
1727 disconnect(acb);
1728 goto out_unlock;
1729 }
1730 if (scsi_intstatus & INT_RESELECTED) {
1731 reselect(acb);
1732 goto out_unlock;
1733 }
1734 if (scsi_intstatus & INT_SELECT) {
1735 dprintkl(KERN_INFO, "Host does not support target mode!\n");
1736 goto out_unlock;
1737 }
1738 if (scsi_intstatus & INT_SCSIRESET) {
1739 scsi_reset_detect(acb);
1740 goto out_unlock;
1741 }
1742 if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) {
1743 dcb = acb->active_dcb;
1744 if (!dcb) {
1745 dprintkl(KERN_DEBUG,
1746 "Oops: BusService (%04x %02x) w/o ActiveDCB!\n",
1747 scsi_status, scsi_intstatus);
1748 goto out_unlock;
1749 }
1750 srb = dcb->active_srb;
1751 if (dcb->flag & ABORT_DEV_) {
1752 dprintkdbg(DBG_0, "MsgOut Abort Device.....\n");
1753 enable_msgout_abort(acb, srb);
1754 }
1755
1756
1757 phase = (u16)srb->scsi_phase;
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772 dc395x_statev = dc395x_scsi_phase0[phase];
1773 dc395x_statev(acb, srb, &scsi_status);
1774
1775
1776
1777
1778
1779
1780 srb->scsi_phase = scsi_status & PHASEMASK;
1781 phase = (u16)scsi_status & PHASEMASK;
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795 dc395x_statev = dc395x_scsi_phase1[phase];
1796 dc395x_statev(acb, srb, &scsi_status);
1797 }
1798 out_unlock:
1799 DC395x_UNLOCK_IO(acb->scsi_host, flags);
1800}
1801
1802
1803static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
1804{
1805 struct AdapterCtlBlk *acb = dev_id;
1806 u16 scsi_status;
1807 u8 dma_status;
1808 irqreturn_t handled = IRQ_NONE;
1809
1810
1811
1812
1813 scsi_status = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
1814 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
1815 if (scsi_status & SCSIINTERRUPT) {
1816
1817 dc395x_handle_interrupt(acb, scsi_status);
1818 handled = IRQ_HANDLED;
1819 }
1820 else if (dma_status & 0x20) {
1821
1822 dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status);
1823#if 0
1824 dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n");
1825 if (acb->active_dcb) {
1826 acb->active_dcb-> flag |= ABORT_DEV_;
1827 if (acb->active_dcb->active_srb)
1828 enable_msgout_abort(acb, acb->active_dcb->active_srb);
1829 }
1830 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO);
1831#else
1832 dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n");
1833 acb = NULL;
1834#endif
1835 handled = IRQ_HANDLED;
1836 }
1837
1838 return handled;
1839}
1840
1841
1842static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1843 u16 *pscsi_status)
1844{
1845 dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd);
1846 if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
1847 *pscsi_status = PH_BUS_FREE;
1848
1849 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1850 srb->state &= ~SRB_MSGOUT;
1851}
1852
1853
1854static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1855 u16 *pscsi_status)
1856{
1857 u16 i;
1858 u8 *ptr;
1859 dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd);
1860
1861 clear_fifo(acb, "msgout_phase1");
1862 if (!(srb->state & SRB_MSGOUT)) {
1863 srb->state |= SRB_MSGOUT;
1864 dprintkl(KERN_DEBUG,
1865 "msgout_phase1: (0x%p) Phase unexpected\n",
1866 srb->cmd);
1867 }
1868 if (!srb->msg_count) {
1869 dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n",
1870 srb->cmd);
1871 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP);
1872 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1873 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
1874 return;
1875 }
1876 ptr = (u8 *)srb->msgout_buf;
1877 for (i = 0; i < srb->msg_count; i++)
1878 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
1879 srb->msg_count = 0;
1880 if (srb->msgout_buf[0] == MSG_ABORT)
1881 srb->state = SRB_ABORT_SENT;
1882
1883 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
1884}
1885
1886
1887static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1888 u16 *pscsi_status)
1889{
1890 dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd);
1891 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1892}
1893
1894
1895static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1896 u16 *pscsi_status)
1897{
1898 struct DeviceCtlBlk *dcb;
1899 u8 *ptr;
1900 u16 i;
1901 dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd);
1902
1903 clear_fifo(acb, "command_phase1");
1904 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
1905 if (!(srb->flag & AUTO_REQSENSE)) {
1906 ptr = (u8 *)srb->cmd->cmnd;
1907 for (i = 0; i < srb->cmd->cmd_len; i++) {
1908 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr);
1909 ptr++;
1910 }
1911 } else {
1912 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
1913 dcb = acb->active_dcb;
1914
1915 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1916 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1917 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1918 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
1919 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1920 }
1921 srb->state |= SRB_COMMAND;
1922
1923 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1924
1925 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
1926}
1927
1928
1929
1930
1931
1932
1933static void sg_verify_length(struct ScsiReqBlk *srb)
1934{
1935 if (debug_enabled(DBG_SG)) {
1936 unsigned len = 0;
1937 unsigned idx = srb->sg_index;
1938 struct SGentry *psge = srb->segment_x + idx;
1939 for (; idx < srb->sg_count; psge++, idx++)
1940 len += psge->length;
1941 if (len != srb->total_xfer_length)
1942 dprintkdbg(DBG_SG,
1943 "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n",
1944 srb->total_xfer_length, len);
1945 }
1946}
1947
1948
1949
1950
1951
1952
1953static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
1954{
1955 u8 idx;
1956 u32 xferred = srb->total_xfer_length - left;
1957 struct SGentry *psge = srb->segment_x + srb->sg_index;
1958
1959 dprintkdbg(DBG_0,
1960 "sg_update_list: Transferred %i of %i bytes, %i remain\n",
1961 xferred, srb->total_xfer_length, left);
1962 if (xferred == 0) {
1963
1964 return;
1965 }
1966
1967 sg_verify_length(srb);
1968 srb->total_xfer_length = left;
1969 for (idx = srb->sg_index; idx < srb->sg_count; idx++) {
1970 if (xferred >= psge->length) {
1971
1972 xferred -= psge->length;
1973 } else {
1974
1975 psge->length -= xferred;
1976 psge->address += xferred;
1977 srb->sg_index = idx;
1978 pci_dma_sync_single_for_device(srb->dcb->
1979 acb->dev,
1980 srb->sg_bus_addr,
1981 SEGMENTX_LEN,
1982 PCI_DMA_TODEVICE);
1983 break;
1984 }
1985 psge++;
1986 }
1987 sg_verify_length(srb);
1988}
1989
1990
1991
1992
1993
1994
1995
1996
1997static void sg_subtract_one(struct ScsiReqBlk *srb)
1998{
1999 sg_update_list(srb, srb->total_xfer_length - 1);
2000}
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
2012 struct ScsiReqBlk *srb)
2013{
2014
2015 if (DC395x_read16(acb, TRM_S1040_DMA_COMMAND) & 0x0001) {
2016 if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
2017 clear_fifo(acb, "cleanup/in");
2018 if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
2019 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
2020 } else {
2021 if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
2022 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
2023 if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
2024 clear_fifo(acb, "cleanup/out");
2025 }
2026 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2027}
2028
2029
2030
2031
2032
2033
2034#define DC395x_LASTPIO 4
2035
2036
2037static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2038 u16 *pscsi_status)
2039{
2040 struct DeviceCtlBlk *dcb = srb->dcb;
2041 u16 scsi_status = *pscsi_status;
2042 u32 d_left_counter = 0;
2043 dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n",
2044 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058 dprintkdbg(DBG_PIO, "data_out_phase0: "
2059 "DMA{fifocnt=0x%02x fifostat=0x%02x} "
2060 "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n",
2061 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
2062 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
2063 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
2064 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status,
2065 srb->total_xfer_length);
2066 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO);
2067
2068 if (!(srb->state & SRB_XFERPAD)) {
2069 if (scsi_status & PARITYERROR)
2070 srb->status |= PARITY_ERROR;
2071
2072
2073
2074
2075
2076
2077
2078 if (!(scsi_status & SCSIXFERDONE)) {
2079
2080
2081
2082
2083 d_left_counter =
2084 (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
2085 0x1F);
2086 if (dcb->sync_period & WIDE_SYNC)
2087 d_left_counter <<= 1;
2088
2089 dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n"
2090 "SCSI{fifocnt=0x%02x cnt=0x%08x} "
2091 "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n",
2092 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
2093 (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
2094 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
2095 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
2096 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
2097 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
2098 DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
2099 }
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109 if (srb->total_xfer_length > DC395x_LASTPIO)
2110 d_left_counter +=
2111 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
2112
2113
2114
2115
2116 if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
2117 && scsi_bufflen(srb->cmd) % 2) {
2118 d_left_counter = 0;
2119 dprintkl(KERN_INFO,
2120 "data_out_phase0: Discard 1 byte (0x%02x)\n",
2121 scsi_status);
2122 }
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133 if (d_left_counter == 0) {
2134 srb->total_xfer_length = 0;
2135 } else {
2136
2137
2138
2139
2140
2141 long oldxferred =
2142 srb->total_xfer_length - d_left_counter;
2143 const int diff =
2144 (dcb->sync_period & WIDE_SYNC) ? 2 : 1;
2145 sg_update_list(srb, d_left_counter);
2146
2147 if ((srb->segment_x[srb->sg_index].length ==
2148 diff && scsi_sg_count(srb->cmd))
2149 || ((oldxferred & ~PAGE_MASK) ==
2150 (PAGE_SIZE - diff))
2151 ) {
2152 dprintkl(KERN_INFO, "data_out_phase0: "
2153 "Work around chip bug (%i)?\n", diff);
2154 d_left_counter =
2155 srb->total_xfer_length - diff;
2156 sg_update_list(srb, d_left_counter);
2157
2158
2159
2160
2161 }
2162 }
2163 }
2164 if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) {
2165 cleanup_after_transfer(acb, srb);
2166 }
2167}
2168
2169
2170static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2171 u16 *pscsi_status)
2172{
2173 dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n",
2174 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2175 clear_fifo(acb, "data_out_phase1");
2176
2177 data_io_transfer(acb, srb, XFERDATAOUT);
2178}
2179
2180static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2181 u16 *pscsi_status)
2182{
2183 u16 scsi_status = *pscsi_status;
2184
2185 dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n",
2186 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201 if (!(srb->state & SRB_XFERPAD)) {
2202 u32 d_left_counter;
2203 unsigned int sc, fc;
2204
2205 if (scsi_status & PARITYERROR) {
2206 dprintkl(KERN_INFO, "data_in_phase0: (0x%p) "
2207 "Parity Error\n", srb->cmd);
2208 srb->status |= PARITY_ERROR;
2209 }
2210
2211
2212
2213
2214
2215
2216 if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) {
2217#if 0
2218 int ctr = 6000000;
2219 dprintkl(KERN_DEBUG,
2220 "DIP0: Wait for DMA FIFO to flush ...\n");
2221
2222
2223
2224 while (!
2225 (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) &
2226 0x80) && --ctr);
2227 if (ctr < 6000000 - 1)
2228 dprintkl(KERN_DEBUG
2229 "DIP0: Had to wait for DMA ...\n");
2230 if (!ctr)
2231 dprintkl(KERN_ERR,
2232 "Deadlock in DIP0 waiting for DMA FIFO empty!!\n");
2233
2234#endif
2235 dprintkdbg(DBG_KG, "data_in_phase0: "
2236 "DMA{fifocnt=0x%02x fifostat=0x%02x}\n",
2237 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
2238 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT));
2239 }
2240
2241 sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
2242 fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
2243 d_left_counter = sc + ((fc & 0x1f)
2244 << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
2245 0));
2246 dprintkdbg(DBG_KG, "data_in_phase0: "
2247 "SCSI{fifocnt=0x%02x%s ctr=0x%08x} "
2248 "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} "
2249 "Remain{totxfer=%i scsi_fifo+ctr=%i}\n",
2250 fc,
2251 (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
2252 sc,
2253 fc,
2254 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
2255 DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
2256 srb->total_xfer_length, d_left_counter);
2257#if DC395x_LASTPIO
2258
2259 if (d_left_counter
2260 && srb->total_xfer_length <= DC395x_LASTPIO) {
2261 size_t left_io = srb->total_xfer_length;
2262
2263
2264
2265 dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) "
2266 "for remaining %i bytes:",
2267 fc & 0x1f,
2268 (srb->dcb->sync_period & WIDE_SYNC) ?
2269 "words" : "bytes",
2270 srb->total_xfer_length);
2271 if (srb->dcb->sync_period & WIDE_SYNC)
2272 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
2273 CFG2_WIDEFIFO);
2274 while (left_io) {
2275 unsigned char *virt, *base = NULL;
2276 unsigned long flags = 0;
2277 size_t len = left_io;
2278 size_t offset = srb->request_length - left_io;
2279
2280 local_irq_save(flags);
2281
2282
2283 base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
2284 srb->sg_count, &offset, &len);
2285 virt = base + offset;
2286
2287 left_io -= len;
2288
2289 while (len) {
2290 u8 byte;
2291 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2292 *virt++ = byte;
2293
2294 if (debug_enabled(DBG_PIO))
2295 printk(" %02x", byte);
2296
2297 d_left_counter--;
2298 sg_subtract_one(srb);
2299
2300 len--;
2301
2302 fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
2303
2304 if (fc == 0x40) {
2305 left_io = 0;
2306 break;
2307 }
2308 }
2309
2310 WARN_ON((fc != 0x40) == !d_left_counter);
2311
2312 if (fc == 0x40 && (srb->dcb->sync_period & WIDE_SYNC)) {
2313
2314 if (srb->total_xfer_length > 0) {
2315 u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2316
2317 *virt++ = byte;
2318 srb->total_xfer_length--;
2319 if (debug_enabled(DBG_PIO))
2320 printk(" %02x", byte);
2321 }
2322
2323 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2324 }
2325
2326 scsi_kunmap_atomic_sg(base);
2327 local_irq_restore(flags);
2328 }
2329
2330
2331 if (debug_enabled(DBG_PIO))
2332 printk("\n");
2333 }
2334#endif
2335
2336#if 0
2337
2338
2339
2340
2341 if (!(scsi_status & SCSIXFERDONE)) {
2342
2343
2344
2345
2346 d_left_counter =
2347 (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
2348 0x1F);
2349 if (srb->dcb->sync_period & WIDE_SYNC)
2350 d_left_counter <<= 1;
2351
2352
2353
2354
2355
2356 }
2357#endif
2358
2359 if (d_left_counter == 0
2360 || (scsi_status & SCSIXFERCNT_2_ZERO)) {
2361#if 0
2362 int ctr = 6000000;
2363 u8 TempDMAstatus;
2364 do {
2365 TempDMAstatus =
2366 DC395x_read8(acb, TRM_S1040_DMA_STATUS);
2367 } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr);
2368 if (!ctr)
2369 dprintkl(KERN_ERR,
2370 "Deadlock in DataInPhase0 waiting for DMA!!\n");
2371 srb->total_xfer_length = 0;
2372#endif
2373 srb->total_xfer_length = d_left_counter;
2374 } else {
2375
2376
2377
2378
2379
2380
2381
2382
2383 sg_update_list(srb, d_left_counter);
2384 }
2385 }
2386
2387 if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) {
2388 cleanup_after_transfer(acb, srb);
2389 }
2390}
2391
2392
2393static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2394 u16 *pscsi_status)
2395{
2396 dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n",
2397 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2398 data_io_transfer(acb, srb, XFERDATAIN);
2399}
2400
2401
2402static void data_io_transfer(struct AdapterCtlBlk *acb,
2403 struct ScsiReqBlk *srb, u16 io_dir)
2404{
2405 struct DeviceCtlBlk *dcb = srb->dcb;
2406 u8 bval;
2407 dprintkdbg(DBG_0,
2408 "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
2409 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
2410 ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
2411 srb->total_xfer_length, srb->sg_index, srb->sg_count);
2412 if (srb == acb->tmp_srb)
2413 dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n");
2414 if (srb->sg_index >= srb->sg_count) {
2415
2416 return;
2417 }
2418
2419 if (srb->total_xfer_length > DC395x_LASTPIO) {
2420 u8 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
2421
2422
2423
2424
2425 if (dma_status & XFERPENDING) {
2426 dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! "
2427 "Expect trouble!\n");
2428 dump_register_info(acb, dcb, srb);
2429 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
2430 }
2431
2432
2433
2434
2435
2436 srb->state |= SRB_DATA_XFER;
2437 DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0);
2438 if (scsi_sg_count(srb->cmd)) {
2439 io_dir |= DMACMD_SG;
2440 DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
2441 srb->sg_bus_addr +
2442 sizeof(struct SGentry) *
2443 srb->sg_index);
2444
2445 DC395x_write32(acb, TRM_S1040_DMA_XCNT,
2446 ((u32)(srb->sg_count -
2447 srb->sg_index) << 3));
2448 } else {
2449 io_dir &= ~DMACMD_SG;
2450 DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
2451 srb->segment_x[0].address);
2452 DC395x_write32(acb, TRM_S1040_DMA_XCNT,
2453 srb->segment_x[0].length);
2454 }
2455
2456 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
2457 srb->total_xfer_length);
2458 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2459 if (io_dir & DMACMD_DIR) {
2460 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2461 SCMD_DMA_IN);
2462 DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
2463 } else {
2464 DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
2465 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2466 SCMD_DMA_OUT);
2467 }
2468
2469 }
2470#if DC395x_LASTPIO
2471 else if (srb->total_xfer_length > 0) {
2472
2473
2474
2475
2476 srb->state |= SRB_DATA_XFER;
2477
2478 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
2479 srb->total_xfer_length);
2480 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2481 if (io_dir & DMACMD_DIR) {
2482 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2483 SCMD_FIFO_IN);
2484 } else {
2485 int ln = srb->total_xfer_length;
2486 size_t left_io = srb->total_xfer_length;
2487
2488 if (srb->dcb->sync_period & WIDE_SYNC)
2489 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
2490 CFG2_WIDEFIFO);
2491
2492 while (left_io) {
2493 unsigned char *virt, *base = NULL;
2494 unsigned long flags = 0;
2495 size_t len = left_io;
2496 size_t offset = srb->request_length - left_io;
2497
2498 local_irq_save(flags);
2499
2500 base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
2501 srb->sg_count, &offset, &len);
2502 virt = base + offset;
2503
2504 left_io -= len;
2505
2506 while (len--) {
2507 if (debug_enabled(DBG_PIO))
2508 printk(" %02x", *virt);
2509
2510 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++);
2511
2512 sg_subtract_one(srb);
2513 }
2514
2515 scsi_kunmap_atomic_sg(base);
2516 local_irq_restore(flags);
2517 }
2518 if (srb->dcb->sync_period & WIDE_SYNC) {
2519 if (ln % 2) {
2520 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
2521 if (debug_enabled(DBG_PIO))
2522 printk(" |00");
2523 }
2524 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2525 }
2526
2527 if (debug_enabled(DBG_PIO))
2528 printk("\n");
2529 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2530 SCMD_FIFO_OUT);
2531 }
2532 }
2533#endif
2534 else {
2535 u8 data = 0, data2 = 0;
2536 if (srb->sg_count) {
2537 srb->adapter_status = H_OVER_UNDER_RUN;
2538 srb->status |= OVER_RUN;
2539 }
2540
2541
2542
2543
2544
2545 if (dcb->sync_period & WIDE_SYNC) {
2546 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 2);
2547 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
2548 CFG2_WIDEFIFO);
2549 if (io_dir & DMACMD_DIR) {
2550 data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2551 data2 = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2552 } else {
2553
2554
2555
2556 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
2557 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'G');
2558 }
2559 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2560 } else {
2561 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
2562
2563
2564 if (io_dir & DMACMD_DIR)
2565 data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2566 else
2567 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
2568 }
2569 srb->state |= SRB_XFERPAD;
2570 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2571
2572 bval = (io_dir & DMACMD_DIR) ? SCMD_FIFO_IN : SCMD_FIFO_OUT;
2573 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, bval);
2574 }
2575}
2576
2577
2578static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2579 u16 *pscsi_status)
2580{
2581 dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n",
2582 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2583 srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2584 srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2585 srb->state = SRB_COMPLETED;
2586 *pscsi_status = PH_BUS_FREE;
2587 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2588 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
2589}
2590
2591
2592static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2593 u16 *pscsi_status)
2594{
2595 dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n",
2596 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2597 srb->state = SRB_STATUS;
2598 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2599 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
2600}
2601
2602
2603
2604static inline u8 msgin_completed(u8 * msgbuf, u32 len)
2605{
2606 if (*msgbuf == EXTENDED_MESSAGE) {
2607 if (len < 2)
2608 return 0;
2609 if (len < msgbuf[1] + 2)
2610 return 0;
2611 } else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f)
2612 if (len < 2)
2613 return 0;
2614 return 1;
2615}
2616
2617
2618static inline void msgin_reject(struct AdapterCtlBlk *acb,
2619 struct ScsiReqBlk *srb)
2620{
2621 srb->msgout_buf[0] = MESSAGE_REJECT;
2622 srb->msg_count = 1;
2623 DC395x_ENABLE_MSGOUT;
2624 srb->state &= ~SRB_MSGIN;
2625 srb->state |= SRB_MSGOUT;
2626 dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n",
2627 srb->msgin_buf[0],
2628 srb->dcb->target_id, srb->dcb->target_lun);
2629}
2630
2631
2632static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
2633 struct DeviceCtlBlk *dcb, u8 tag)
2634{
2635 struct ScsiReqBlk *srb = NULL;
2636 struct ScsiReqBlk *i;
2637 dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n",
2638 srb->cmd, tag, srb);
2639
2640 if (!(dcb->tag_mask & (1 << tag)))
2641 dprintkl(KERN_DEBUG,
2642 "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n",
2643 dcb->tag_mask, tag);
2644
2645 if (list_empty(&dcb->srb_going_list))
2646 goto mingx0;
2647 list_for_each_entry(i, &dcb->srb_going_list, list) {
2648 if (i->tag_number == tag) {
2649 srb = i;
2650 break;
2651 }
2652 }
2653 if (!srb)
2654 goto mingx0;
2655
2656 dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n",
2657 srb->cmd, srb->dcb->target_id, srb->dcb->target_lun);
2658 if (dcb->flag & ABORT_DEV_) {
2659
2660 enable_msgout_abort(acb, srb);
2661 }
2662
2663 if (!(srb->state & SRB_DISCONNECT))
2664 goto mingx0;
2665
2666 memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len);
2667 srb->state |= dcb->active_srb->state;
2668 srb->state |= SRB_DATA_XFER;
2669 dcb->active_srb = srb;
2670
2671 return srb;
2672
2673 mingx0:
2674 srb = acb->tmp_srb;
2675 srb->state = SRB_UNEXPECT_RESEL;
2676 dcb->active_srb = srb;
2677 srb->msgout_buf[0] = MSG_ABORT_TAG;
2678 srb->msg_count = 1;
2679 DC395x_ENABLE_MSGOUT;
2680 dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
2681 return srb;
2682}
2683
2684
2685static inline void reprogram_regs(struct AdapterCtlBlk *acb,
2686 struct DeviceCtlBlk *dcb)
2687{
2688 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
2689 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
2690 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
2691 set_xfer_rate(acb, dcb);
2692}
2693
2694
2695
2696static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2697{
2698 struct DeviceCtlBlk *dcb = srb->dcb;
2699 dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n",
2700 dcb->target_id, dcb->target_lun);
2701
2702 dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
2703 dcb->sync_mode |= SYNC_NEGO_DONE;
2704
2705 dcb->sync_offset = 0;
2706 dcb->min_nego_period = 200 >> 2;
2707 srb->state &= ~SRB_DO_SYNC_NEGO;
2708 reprogram_regs(acb, dcb);
2709 if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
2710 && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
2711 build_wdtr(acb, dcb, srb);
2712 DC395x_ENABLE_MSGOUT;
2713 dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n");
2714 }
2715}
2716
2717
2718
2719static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2720{
2721 struct DeviceCtlBlk *dcb = srb->dcb;
2722 u8 bval;
2723 int fact;
2724 dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins "
2725 "(%02i.%01i MHz) Offset %i\n",
2726 dcb->target_id, srb->msgin_buf[3] << 2,
2727 (250 / srb->msgin_buf[3]),
2728 ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3],
2729 srb->msgin_buf[4]);
2730
2731 if (srb->msgin_buf[4] > 15)
2732 srb->msgin_buf[4] = 15;
2733 if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO))
2734 dcb->sync_offset = 0;
2735 else if (dcb->sync_offset == 0)
2736 dcb->sync_offset = srb->msgin_buf[4];
2737 if (srb->msgin_buf[4] > dcb->sync_offset)
2738 srb->msgin_buf[4] = dcb->sync_offset;
2739 else
2740 dcb->sync_offset = srb->msgin_buf[4];
2741 bval = 0;
2742 while (bval < 7 && (srb->msgin_buf[3] > clock_period[bval]
2743 || dcb->min_nego_period >
2744 clock_period[bval]))
2745 bval++;
2746 if (srb->msgin_buf[3] < clock_period[bval])
2747 dprintkl(KERN_INFO,
2748 "msgin_set_sync: Increase sync nego period to %ins\n",
2749 clock_period[bval] << 2);
2750 srb->msgin_buf[3] = clock_period[bval];
2751 dcb->sync_period &= 0xf0;
2752 dcb->sync_period |= ALT_SYNC | bval;
2753 dcb->min_nego_period = srb->msgin_buf[3];
2754
2755 if (dcb->sync_period & WIDE_SYNC)
2756 fact = 500;
2757 else
2758 fact = 250;
2759
2760 dprintkl(KERN_INFO,
2761 "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n",
2762 dcb->target_id, (fact == 500) ? "Wide16" : "",
2763 dcb->min_nego_period << 2, dcb->sync_offset,
2764 (fact / dcb->min_nego_period),
2765 ((fact % dcb->min_nego_period) * 10 +
2766 dcb->min_nego_period / 2) / dcb->min_nego_period);
2767
2768 if (!(srb->state & SRB_DO_SYNC_NEGO)) {
2769
2770 dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n",
2771 srb->msgin_buf[3] << 2, srb->msgin_buf[4]);
2772
2773 memcpy(srb->msgout_buf, srb->msgin_buf, 5);
2774 srb->msg_count = 5;
2775 DC395x_ENABLE_MSGOUT;
2776 dcb->sync_mode |= SYNC_NEGO_DONE;
2777 } else {
2778 if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
2779 && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
2780 build_wdtr(acb, dcb, srb);
2781 DC395x_ENABLE_MSGOUT;
2782 dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n");
2783 }
2784 }
2785 srb->state &= ~SRB_DO_SYNC_NEGO;
2786 dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE;
2787
2788 reprogram_regs(acb, dcb);
2789}
2790
2791
2792static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
2793 struct ScsiReqBlk *srb)
2794{
2795 struct DeviceCtlBlk *dcb = srb->dcb;
2796 dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
2797
2798 dcb->sync_period &= ~WIDE_SYNC;
2799 dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
2800 dcb->sync_mode |= WIDE_NEGO_DONE;
2801 srb->state &= ~SRB_DO_WIDE_NEGO;
2802 reprogram_regs(acb, dcb);
2803 if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
2804 && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
2805 build_sdtr(acb, dcb, srb);
2806 DC395x_ENABLE_MSGOUT;
2807 dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n");
2808 }
2809}
2810
2811static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2812{
2813 struct DeviceCtlBlk *dcb = srb->dcb;
2814 u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
2815 && acb->config & HCC_WIDE_CARD) ? 1 : 0;
2816 dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
2817
2818 if (srb->msgin_buf[3] > wide)
2819 srb->msgin_buf[3] = wide;
2820
2821 if (!(srb->state & SRB_DO_WIDE_NEGO)) {
2822 dprintkl(KERN_DEBUG,
2823 "msgin_set_wide: Wide nego initiated <%02i>\n",
2824 dcb->target_id);
2825 memcpy(srb->msgout_buf, srb->msgin_buf, 4);
2826 srb->msg_count = 4;
2827 srb->state |= SRB_DO_WIDE_NEGO;
2828 DC395x_ENABLE_MSGOUT;
2829 }
2830
2831 dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE);
2832 if (srb->msgin_buf[3] > 0)
2833 dcb->sync_period |= WIDE_SYNC;
2834 else
2835 dcb->sync_period &= ~WIDE_SYNC;
2836 srb->state &= ~SRB_DO_WIDE_NEGO;
2837
2838 dprintkdbg(DBG_1,
2839 "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n",
2840 (8 << srb->msgin_buf[3]), dcb->target_id);
2841 reprogram_regs(acb, dcb);
2842 if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
2843 && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
2844 build_sdtr(acb, dcb, srb);
2845 DC395x_ENABLE_MSGOUT;
2846 dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n");
2847 }
2848}
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2864 u16 *pscsi_status)
2865{
2866 struct DeviceCtlBlk *dcb = acb->active_dcb;
2867 dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd);
2868
2869 srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2870 if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
2871
2872 switch (srb->msgin_buf[0]) {
2873 case DISCONNECT:
2874 srb->state = SRB_DISCONNECT;
2875 break;
2876
2877 case SIMPLE_QUEUE_TAG:
2878 case HEAD_OF_QUEUE_TAG:
2879 case ORDERED_QUEUE_TAG:
2880 srb =
2881 msgin_qtag(acb, dcb,
2882 srb->msgin_buf[1]);
2883 break;
2884
2885 case MESSAGE_REJECT:
2886 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
2887 DO_CLRATN | DO_DATALATCH);
2888
2889 if (srb->state & SRB_DO_SYNC_NEGO) {
2890 msgin_set_async(acb, srb);
2891 break;
2892 }
2893
2894 if (srb->state & SRB_DO_WIDE_NEGO) {
2895 msgin_set_nowide(acb, srb);
2896 break;
2897 }
2898 enable_msgout_abort(acb, srb);
2899
2900 break;
2901
2902 case EXTENDED_MESSAGE:
2903
2904 if (srb->msgin_buf[1] == 3
2905 && srb->msgin_buf[2] == EXTENDED_SDTR) {
2906 msgin_set_sync(acb, srb);
2907 break;
2908 }
2909
2910 if (srb->msgin_buf[1] == 2
2911 && srb->msgin_buf[2] == EXTENDED_WDTR
2912 && srb->msgin_buf[3] <= 2) {
2913 msgin_set_wide(acb, srb);
2914 break;
2915 }
2916 msgin_reject(acb, srb);
2917 break;
2918
2919 case MSG_IGNOREWIDE:
2920
2921 dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
2922 break;
2923
2924 case COMMAND_COMPLETE:
2925
2926 break;
2927
2928 case SAVE_POINTERS:
2929
2930
2931
2932
2933 dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
2934 "SAVE POINTER rem=%i Ignore\n",
2935 srb->cmd, srb->total_xfer_length);
2936 break;
2937
2938 case RESTORE_POINTERS:
2939 dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n");
2940 break;
2941
2942 case ABORT:
2943 dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
2944 "<%02i-%i> ABORT msg\n",
2945 srb->cmd, dcb->target_id,
2946 dcb->target_lun);
2947 dcb->flag |= ABORT_DEV_;
2948 enable_msgout_abort(acb, srb);
2949 break;
2950
2951 default:
2952
2953 if (srb->msgin_buf[0] & IDENTIFY_BASE) {
2954 dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n");
2955 srb->msg_count = 1;
2956 srb->msgout_buf[0] = dcb->identify_msg;
2957 DC395x_ENABLE_MSGOUT;
2958 srb->state |= SRB_MSGOUT;
2959
2960 }
2961 msgin_reject(acb, srb);
2962 }
2963
2964
2965 srb->state &= ~SRB_MSGIN;
2966 acb->msg_len = 0;
2967 }
2968 *pscsi_status = PH_BUS_FREE;
2969 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2970 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
2971}
2972
2973
2974static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2975 u16 *pscsi_status)
2976{
2977 dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd);
2978 clear_fifo(acb, "msgin_phase1");
2979 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
2980 if (!(srb->state & SRB_MSGIN)) {
2981 srb->state &= ~SRB_DISCONNECT;
2982 srb->state |= SRB_MSGIN;
2983 }
2984 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2985
2986 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_IN);
2987}
2988
2989
2990static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2991 u16 *pscsi_status)
2992{
2993}
2994
2995
2996static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2997 u16 *pscsi_status)
2998{
2999}
3000
3001
3002static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb)
3003{
3004 struct DeviceCtlBlk *i;
3005
3006
3007 if (dcb->identify_msg & 0x07)
3008 return;
3009
3010 if (acb->scan_devices) {
3011 current_sync_offset = dcb->sync_offset;
3012 return;
3013 }
3014
3015 list_for_each_entry(i, &acb->dcb_list, list)
3016 if (i->target_id == dcb->target_id) {
3017 i->sync_period = dcb->sync_period;
3018 i->sync_offset = dcb->sync_offset;
3019 i->sync_mode = dcb->sync_mode;
3020 i->min_nego_period = dcb->min_nego_period;
3021 }
3022}
3023
3024
3025static void disconnect(struct AdapterCtlBlk *acb)
3026{
3027 struct DeviceCtlBlk *dcb = acb->active_dcb;
3028 struct ScsiReqBlk *srb;
3029
3030 if (!dcb) {
3031 dprintkl(KERN_ERR, "disconnect: No such device\n");
3032 udelay(500);
3033
3034 acb->last_reset =
3035 jiffies + HZ / 2 +
3036 HZ * acb->eeprom.delay_time;
3037 clear_fifo(acb, "disconnectEx");
3038 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
3039 return;
3040 }
3041 srb = dcb->active_srb;
3042 acb->active_dcb = NULL;
3043 dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd);
3044
3045 srb->scsi_phase = PH_BUS_FREE;
3046 clear_fifo(acb, "disconnect");
3047 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
3048 if (srb->state & SRB_UNEXPECT_RESEL) {
3049 dprintkl(KERN_ERR,
3050 "disconnect: Unexpected reselection <%02i-%i>\n",
3051 dcb->target_id, dcb->target_lun);
3052 srb->state = 0;
3053 waiting_process_next(acb);
3054 } else if (srb->state & SRB_ABORT_SENT) {
3055 dcb->flag &= ~ABORT_DEV_;
3056 acb->last_reset = jiffies + HZ / 2 + 1;
3057 dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
3058 doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
3059 waiting_process_next(acb);
3060 } else {
3061 if ((srb->state & (SRB_START_ + SRB_MSGOUT))
3062 || !(srb->
3063 state & (SRB_DISCONNECT + SRB_COMPLETED))) {
3064
3065
3066
3067
3068
3069 if (srb->state != SRB_START_
3070 && srb->state != SRB_MSGOUT) {
3071 srb->state = SRB_READY;
3072 dprintkl(KERN_DEBUG,
3073 "disconnect: (0x%p) Unexpected\n",
3074 srb->cmd);
3075 srb->target_status = SCSI_STAT_SEL_TIMEOUT;
3076 goto disc1;
3077 } else {
3078
3079 dprintkdbg(DBG_KG, "disconnect: (0x%p) "
3080 "<%02i-%i> SelTO\n", srb->cmd,
3081 dcb->target_id, dcb->target_lun);
3082 if (srb->retry_count++ > DC395x_MAX_RETRIES
3083 || acb->scan_devices) {
3084 srb->target_status =
3085 SCSI_STAT_SEL_TIMEOUT;
3086 goto disc1;
3087 }
3088 free_tag(dcb, srb);
3089 srb_going_to_waiting_move(dcb, srb);
3090 dprintkdbg(DBG_KG,
3091 "disconnect: (0x%p) Retry\n",
3092 srb->cmd);
3093 waiting_set_timer(acb, HZ / 20);
3094 }
3095 } else if (srb->state & SRB_DISCONNECT) {
3096 u8 bval = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
3097
3098
3099
3100 if (bval & 0x40) {
3101 dprintkdbg(DBG_0, "disconnect: SCSI bus stat "
3102 " 0x%02x: ACK set! Other controllers?\n",
3103 bval);
3104
3105 } else
3106 waiting_process_next(acb);
3107 } else if (srb->state & SRB_COMPLETED) {
3108 disc1:
3109
3110
3111
3112 free_tag(dcb, srb);
3113 dcb->active_srb = NULL;
3114 srb->state = SRB_FREE;
3115 srb_done(acb, dcb, srb);
3116 }
3117 }
3118}
3119
3120
3121static void reselect(struct AdapterCtlBlk *acb)
3122{
3123 struct DeviceCtlBlk *dcb = acb->active_dcb;
3124 struct ScsiReqBlk *srb = NULL;
3125 u16 rsel_tar_lun_id;
3126 u8 id, lun;
3127 u8 arblostflag = 0;
3128 dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
3129
3130 clear_fifo(acb, "reselect");
3131
3132
3133 rsel_tar_lun_id = DC395x_read16(acb, TRM_S1040_SCSI_TARGETID);
3134 if (dcb) {
3135 srb = dcb->active_srb;
3136 if (!srb) {
3137 dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, "
3138 "but active_srb == NULL\n");
3139 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
3140 return;
3141 }
3142
3143 if (!acb->scan_devices) {
3144 dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> "
3145 "Arb lost but Resel win rsel=%i stat=0x%04x\n",
3146 srb->cmd, dcb->target_id,
3147 dcb->target_lun, rsel_tar_lun_id,
3148 DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
3149 arblostflag = 1;
3150
3151
3152 srb->state = SRB_READY;
3153 free_tag(dcb, srb);
3154 srb_going_to_waiting_move(dcb, srb);
3155 waiting_set_timer(acb, HZ / 20);
3156
3157
3158 }
3159 }
3160
3161 if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8)))
3162 dprintkl(KERN_DEBUG, "reselect: Expects identify msg. "
3163 "Got %i!\n", rsel_tar_lun_id);
3164 id = rsel_tar_lun_id & 0xff;
3165 lun = (rsel_tar_lun_id >> 8) & 7;
3166 dcb = find_dcb(acb, id, lun);
3167 if (!dcb) {
3168 dprintkl(KERN_ERR, "reselect: From non existent device "
3169 "<%02i-%i>\n", id, lun);
3170 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
3171 return;
3172 }
3173 acb->active_dcb = dcb;
3174
3175 if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
3176 dprintkl(KERN_DEBUG, "reselect: in spite of forbidden "
3177 "disconnection? <%02i-%i>\n",
3178 dcb->target_id, dcb->target_lun);
3179
3180 if (dcb->sync_mode & EN_TAG_QUEUEING ) {
3181 srb = acb->tmp_srb;
3182 dcb->active_srb = srb;
3183 } else {
3184
3185 srb = dcb->active_srb;
3186 if (!srb || !(srb->state & SRB_DISCONNECT)) {
3187
3188
3189
3190 dprintkl(KERN_DEBUG,
3191 "reselect: w/o disconnected cmds <%02i-%i>\n",
3192 dcb->target_id, dcb->target_lun);
3193 srb = acb->tmp_srb;
3194 srb->state = SRB_UNEXPECT_RESEL;
3195 dcb->active_srb = srb;
3196 enable_msgout_abort(acb, srb);
3197 } else {
3198 if (dcb->flag & ABORT_DEV_) {
3199
3200 enable_msgout_abort(acb, srb);
3201 } else
3202 srb->state = SRB_DATA_XFER;
3203
3204 }
3205 }
3206 srb->scsi_phase = PH_BUS_FREE;
3207
3208
3209 dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
3210 DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
3211 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
3212 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
3213 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
3214 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
3215
3216 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
3217}
3218
3219
3220static inline u8 tagq_blacklist(char *name)
3221{
3222#ifndef DC395x_NO_TAGQ
3223#if 0
3224 u8 i;
3225 for (i = 0; i < BADDEVCNT; i++)
3226 if (memcmp(name, DC395x_baddevname1[i], 28) == 0)
3227 return 1;
3228#endif
3229 return 0;
3230#else
3231 return 1;
3232#endif
3233}
3234
3235
3236static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr)
3237{
3238
3239 if ((ptr->Vers & 0x07) >= 2 || (ptr->RDF & 0x0F) == 2) {
3240 if ((ptr->Flags & SCSI_INQ_CMDQUEUE)
3241 && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) &&
3242
3243
3244
3245 !tagq_blacklist(((char *)ptr) + 8)) {
3246 if (dcb->max_command == 1)
3247 dcb->max_command =
3248 dcb->acb->tag_max_num;
3249 dcb->sync_mode |= EN_TAG_QUEUEING;
3250
3251 } else
3252 dcb->max_command = 1;
3253 }
3254}
3255
3256
3257static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3258 struct ScsiInqData *ptr)
3259{
3260 u8 bval1 = ptr->DevType & SCSI_DEVTYPE;
3261 dcb->dev_type = bval1;
3262
3263 disc_tagq_set(dcb, ptr);
3264}
3265
3266
3267
3268static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
3269{
3270 struct scsi_cmnd *cmd = srb->cmd;
3271 enum dma_data_direction dir = cmd->sc_data_direction;
3272
3273 if (scsi_sg_count(cmd) && dir != PCI_DMA_NONE) {
3274
3275 dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
3276 srb->sg_bus_addr, SEGMENTX_LEN);
3277 pci_unmap_single(acb->dev, srb->sg_bus_addr,
3278 SEGMENTX_LEN,
3279 PCI_DMA_TODEVICE);
3280 dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
3281 scsi_sg_count(cmd), scsi_bufflen(cmd));
3282
3283 scsi_dma_unmap(cmd);
3284 }
3285}
3286
3287
3288
3289static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
3290 struct ScsiReqBlk *srb)
3291{
3292 if (!(srb->flag & AUTO_REQSENSE))
3293 return;
3294
3295 dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
3296 srb->segment_x[0].address);
3297 pci_unmap_single(acb->dev, srb->segment_x[0].address,
3298 srb->segment_x[0].length, PCI_DMA_FROMDEVICE);
3299
3300 srb->total_xfer_length = srb->xferred;
3301 srb->segment_x[0].address =
3302 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address;
3303 srb->segment_x[0].length =
3304 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length;
3305}
3306
3307
3308
3309
3310
3311
3312static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3313 struct ScsiReqBlk *srb)
3314{
3315 u8 tempcnt, status;
3316 struct scsi_cmnd *cmd = srb->cmd;
3317 enum dma_data_direction dir = cmd->sc_data_direction;
3318 int ckc_only = 1;
3319
3320 dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd,
3321 srb->cmd->device->id, (u8)srb->cmd->device->lun);
3322 dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
3323 srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
3324 scsi_sgtalbe(cmd));
3325 status = srb->target_status;
3326 if (srb->flag & AUTO_REQSENSE) {
3327 dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
3328 pci_unmap_srb_sense(acb, srb);
3329
3330
3331
3332 srb->flag &= ~AUTO_REQSENSE;
3333 srb->adapter_status = 0;
3334 srb->target_status = CHECK_CONDITION << 1;
3335 if (debug_enabled(DBG_1)) {
3336 switch (cmd->sense_buffer[2] & 0x0f) {
3337 case NOT_READY:
3338 dprintkl(KERN_DEBUG,
3339 "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3340 cmd->cmnd[0], dcb->target_id,
3341 dcb->target_lun, status, acb->scan_devices);
3342 break;
3343 case UNIT_ATTENTION:
3344 dprintkl(KERN_DEBUG,
3345 "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3346 cmd->cmnd[0], dcb->target_id,
3347 dcb->target_lun, status, acb->scan_devices);
3348 break;
3349 case ILLEGAL_REQUEST:
3350 dprintkl(KERN_DEBUG,
3351 "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3352 cmd->cmnd[0], dcb->target_id,
3353 dcb->target_lun, status, acb->scan_devices);
3354 break;
3355 case MEDIUM_ERROR:
3356 dprintkl(KERN_DEBUG,
3357 "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3358 cmd->cmnd[0], dcb->target_id,
3359 dcb->target_lun, status, acb->scan_devices);
3360 break;
3361 case HARDWARE_ERROR:
3362 dprintkl(KERN_DEBUG,
3363 "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3364 cmd->cmnd[0], dcb->target_id,
3365 dcb->target_lun, status, acb->scan_devices);
3366 break;
3367 }
3368 if (cmd->sense_buffer[7] >= 6)
3369 printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x "
3370 "(0x%08x 0x%08x)\n",
3371 cmd->sense_buffer[2], cmd->sense_buffer[12],
3372 cmd->sense_buffer[13],
3373 *((unsigned int *)(cmd->sense_buffer + 3)),
3374 *((unsigned int *)(cmd->sense_buffer + 8)));
3375 else
3376 printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n",
3377 cmd->sense_buffer[2],
3378 *((unsigned int *)(cmd->sense_buffer + 3)));
3379 }
3380
3381 if (status == (CHECK_CONDITION << 1)) {
3382 cmd->result = DID_BAD_TARGET << 16;
3383 goto ckc_e;
3384 }
3385 dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
3386
3387 if (srb->total_xfer_length
3388 && srb->total_xfer_length >= cmd->underflow)
3389 cmd->result =
3390 MK_RES_LNX(DRIVER_SENSE, DID_OK,
3391 srb->end_message, CHECK_CONDITION);
3392
3393 else
3394 cmd->result =
3395 MK_RES_LNX(DRIVER_SENSE, DID_OK,
3396 srb->end_message, CHECK_CONDITION);
3397
3398 goto ckc_e;
3399 }
3400
3401
3402 if (status) {
3403
3404
3405
3406 if (status_byte(status) == CHECK_CONDITION) {
3407 request_sense(acb, dcb, srb);
3408 return;
3409 } else if (status_byte(status) == QUEUE_FULL) {
3410 tempcnt = (u8)list_size(&dcb->srb_going_list);
3411 dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
3412 dcb->target_id, dcb->target_lun, tempcnt);
3413 if (tempcnt > 1)
3414 tempcnt--;
3415 dcb->max_command = tempcnt;
3416 free_tag(dcb, srb);
3417 srb_going_to_waiting_move(dcb, srb);
3418 waiting_set_timer(acb, HZ / 20);
3419 srb->adapter_status = 0;
3420 srb->target_status = 0;
3421 return;
3422 } else if (status == SCSI_STAT_SEL_TIMEOUT) {
3423 srb->adapter_status = H_SEL_TIMEOUT;
3424 srb->target_status = 0;
3425 cmd->result = DID_NO_CONNECT << 16;
3426 } else {
3427 srb->adapter_status = 0;
3428 SET_RES_DID(cmd->result, DID_ERROR);
3429 SET_RES_MSG(cmd->result, srb->end_message);
3430 SET_RES_TARGET(cmd->result, status);
3431
3432 }
3433 } else {
3434
3435
3436
3437 status = srb->adapter_status;
3438 if (status & H_OVER_UNDER_RUN) {
3439 srb->target_status = 0;
3440 SET_RES_DID(cmd->result, DID_OK);
3441 SET_RES_MSG(cmd->result, srb->end_message);
3442 } else if (srb->status & PARITY_ERROR) {
3443 SET_RES_DID(cmd->result, DID_PARITY);
3444 SET_RES_MSG(cmd->result, srb->end_message);
3445 } else {
3446
3447 srb->adapter_status = 0;
3448 srb->target_status = 0;
3449 SET_RES_DID(cmd->result, DID_OK);
3450 }
3451 }
3452
3453 if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
3454 pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
3455 scsi_sg_count(cmd), dir);
3456
3457 ckc_only = 0;
3458
3459 ckc_e:
3460
3461 if (cmd->cmnd[0] == INQUIRY) {
3462 unsigned char *base = NULL;
3463 struct ScsiInqData *ptr;
3464 unsigned long flags = 0;
3465 struct scatterlist* sg = scsi_sglist(cmd);
3466 size_t offset = 0, len = sizeof(struct ScsiInqData);
3467
3468 local_irq_save(flags);
3469 base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
3470 ptr = (struct ScsiInqData *)(base + offset);
3471
3472 if (!ckc_only && (cmd->result & RES_DID) == 0
3473 && cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
3474 && dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
3475 dcb->inquiry7 = ptr->Flags;
3476
3477
3478
3479 if ((cmd->result == (DID_OK << 16)
3480 || status_byte(cmd->result) &
3481 CHECK_CONDITION)) {
3482 if (!dcb->init_tcq_flag) {
3483 add_dev(acb, dcb, ptr);
3484 dcb->init_tcq_flag = 1;
3485 }
3486 }
3487
3488 scsi_kunmap_atomic_sg(base);
3489 local_irq_restore(flags);
3490 }
3491
3492
3493 scsi_set_resid(cmd, srb->total_xfer_length);
3494
3495 cmd->SCp.this_residual = srb->total_xfer_length;
3496 cmd->SCp.buffers_residual = 0;
3497 if (debug_enabled(DBG_KG)) {
3498 if (srb->total_xfer_length)
3499 dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
3500 "cmnd=0x%02x Missed %i bytes\n",
3501 cmd, cmd->device->id, (u8)cmd->device->lun,
3502 cmd->cmnd[0], srb->total_xfer_length);
3503 }
3504
3505 srb_going_remove(dcb, srb);
3506
3507 if (srb == acb->tmp_srb)
3508 dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
3509 else {
3510 dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
3511 cmd, cmd->result);
3512 srb_free_insert(acb, srb);
3513 }
3514 pci_unmap_srb(acb, srb);
3515
3516 cmd->scsi_done(cmd);
3517 waiting_process_next(acb);
3518}
3519
3520
3521
3522static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
3523 struct scsi_cmnd *cmd, u8 force)
3524{
3525 struct DeviceCtlBlk *dcb;
3526 dprintkl(KERN_INFO, "doing_srb_done: pids ");
3527
3528 list_for_each_entry(dcb, &acb->dcb_list, list) {
3529 struct ScsiReqBlk *srb;
3530 struct ScsiReqBlk *tmp;
3531 struct scsi_cmnd *p;
3532
3533 list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
3534 enum dma_data_direction dir;
3535 int result;
3536
3537 p = srb->cmd;
3538 dir = p->sc_data_direction;
3539 result = MK_RES(0, did_flag, 0, 0);
3540 printk("G:%p(%02i-%i) ", p,
3541 p->device->id, (u8)p->device->lun);
3542 srb_going_remove(dcb, srb);
3543 free_tag(dcb, srb);
3544 srb_free_insert(acb, srb);
3545 p->result = result;
3546 pci_unmap_srb_sense(acb, srb);
3547 pci_unmap_srb(acb, srb);
3548 if (force) {
3549
3550
3551 p->scsi_done(p);
3552 }
3553 }
3554 if (!list_empty(&dcb->srb_going_list))
3555 dprintkl(KERN_DEBUG,
3556 "How could the ML send cmnds to the Going queue? <%02i-%i>\n",
3557 dcb->target_id, dcb->target_lun);
3558 if (dcb->tag_mask)
3559 dprintkl(KERN_DEBUG,
3560 "tag_mask for <%02i-%i> should be empty, is %08x!\n",
3561 dcb->target_id, dcb->target_lun,
3562 dcb->tag_mask);
3563
3564
3565 list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
3566 int result;
3567 p = srb->cmd;
3568
3569 result = MK_RES(0, did_flag, 0, 0);
3570 printk("W:%p<%02i-%i>", p, p->device->id,
3571 (u8)p->device->lun);
3572 srb_waiting_remove(dcb, srb);
3573 srb_free_insert(acb, srb);
3574 p->result = result;
3575 pci_unmap_srb_sense(acb, srb);
3576 pci_unmap_srb(acb, srb);
3577 if (force) {
3578
3579
3580 cmd->scsi_done(cmd);
3581 }
3582 }
3583 if (!list_empty(&dcb->srb_waiting_list))
3584 dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n",
3585 list_size(&dcb->srb_waiting_list), dcb->target_id,
3586 dcb->target_lun);
3587 dcb->flag &= ~ABORT_DEV_;
3588 }
3589 printk("\n");
3590}
3591
3592
3593static void reset_scsi_bus(struct AdapterCtlBlk *acb)
3594{
3595 dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb);
3596 acb->acb_flag |= RESET_DEV;
3597 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
3598
3599 while (!(DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET))
3600 ;
3601}
3602
3603
3604static void set_basic_config(struct AdapterCtlBlk *acb)
3605{
3606 u8 bval;
3607 u16 wval;
3608 DC395x_write8(acb, TRM_S1040_SCSI_TIMEOUT, acb->sel_timeout);
3609 if (acb->config & HCC_PARITY)
3610 bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK;
3611 else
3612 bval = PHASELATCH | INITIATOR | BLOCKRST;
3613
3614 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG0, bval);
3615
3616
3617 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG1, 0x03);
3618
3619 DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
3620
3621 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, 0x00);
3622
3623 wval = DC395x_read16(acb, TRM_S1040_GEN_CONTROL) & 0x7F;
3624 DC395x_write16(acb, TRM_S1040_GEN_CONTROL, wval);
3625
3626 wval = DC395x_read16(acb, TRM_S1040_DMA_CONFIG) & ~DMA_FIFO_CTRL;
3627 wval |=
3628 DMA_FIFO_HALF_HALF | DMA_ENHANCE ;
3629 DC395x_write16(acb, TRM_S1040_DMA_CONFIG, wval);
3630
3631 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
3632
3633 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x7F);
3634 DC395x_write8(acb, TRM_S1040_DMA_INTEN, EN_SCSIINTR | EN_DMAXFERERROR
3635
3636 );
3637}
3638
3639
3640static void scsi_reset_detect(struct AdapterCtlBlk *acb)
3641{
3642 dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb);
3643
3644 if (timer_pending(&acb->waiting_timer))
3645 del_timer(&acb->waiting_timer);
3646
3647 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
3648 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
3649
3650 udelay(500);
3651
3652 acb->last_reset =
3653 jiffies + 5 * HZ / 2 +
3654 HZ * acb->eeprom.delay_time;
3655
3656 clear_fifo(acb, "scsi_reset_detect");
3657 set_basic_config(acb);
3658
3659
3660
3661 if (acb->acb_flag & RESET_DEV) {
3662 acb->acb_flag |= RESET_DONE;
3663 } else {
3664 acb->acb_flag |= RESET_DETECT;
3665 reset_dev_param(acb);
3666 doing_srb_done(acb, DID_RESET, NULL, 1);
3667
3668 acb->active_dcb = NULL;
3669 acb->acb_flag = 0;
3670 waiting_process_next(acb);
3671 }
3672}
3673
3674
3675static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3676 struct ScsiReqBlk *srb)
3677{
3678 struct scsi_cmnd *cmd = srb->cmd;
3679 dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n",
3680 cmd, cmd->device->id, (u8)cmd->device->lun);
3681
3682 srb->flag |= AUTO_REQSENSE;
3683 srb->adapter_status = 0;
3684 srb->target_status = 0;
3685
3686
3687 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3688
3689
3690 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
3691 srb->segment_x[0].address;
3692 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length =
3693 srb->segment_x[0].length;
3694 srb->xferred = srb->total_xfer_length;
3695
3696 srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
3697 srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
3698
3699 srb->segment_x[0].address =
3700 pci_map_single(acb->dev, cmd->sense_buffer,
3701 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
3702 dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
3703 cmd->sense_buffer, srb->segment_x[0].address,
3704 SCSI_SENSE_BUFFERSIZE);
3705 srb->sg_count = 1;
3706 srb->sg_index = 0;
3707
3708 if (start_scsi(acb, dcb, srb)) {
3709 dprintkl(KERN_DEBUG,
3710 "request_sense: (0x%p) failed <%02i-%i>\n",
3711 srb->cmd, dcb->target_id, dcb->target_lun);
3712 srb_going_to_waiting_move(dcb, srb);
3713 waiting_set_timer(acb, HZ / 100);
3714 }
3715}
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727
3728
3729
3730
3731static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
3732 u8 target, u8 lun)
3733{
3734 struct NvRamType *eeprom = &acb->eeprom;
3735 u8 period_index = eeprom->target[target].period & 0x07;
3736 struct DeviceCtlBlk *dcb;
3737
3738 dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
3739 dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun);
3740 if (!dcb)
3741 return NULL;
3742 dcb->acb = NULL;
3743 INIT_LIST_HEAD(&dcb->srb_going_list);
3744 INIT_LIST_HEAD(&dcb->srb_waiting_list);
3745 dcb->active_srb = NULL;
3746 dcb->tag_mask = 0;
3747 dcb->max_command = 1;
3748 dcb->target_id = target;
3749 dcb->target_lun = lun;
3750 dcb->dev_mode = eeprom->target[target].cfg0;
3751#ifndef DC395x_NO_DISCONNECT
3752 dcb->identify_msg =
3753 IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
3754#else
3755 dcb->identify_msg = IDENTIFY(0, lun);
3756#endif
3757 dcb->inquiry7 = 0;
3758 dcb->sync_mode = 0;
3759 dcb->min_nego_period = clock_period[period_index];
3760 dcb->sync_period = 0;
3761 dcb->sync_offset = 0;
3762 dcb->flag = 0;
3763
3764#ifndef DC395x_NO_WIDE
3765 if ((dcb->dev_mode & NTC_DO_WIDE_NEGO)
3766 && (acb->config & HCC_WIDE_CARD))
3767 dcb->sync_mode |= WIDE_NEGO_ENABLE;
3768#endif
3769#ifndef DC395x_NO_SYNC
3770 if (dcb->dev_mode & NTC_DO_SYNC_NEGO)
3771 if (!(lun) || current_sync_offset)
3772 dcb->sync_mode |= SYNC_NEGO_ENABLE;
3773#endif
3774 if (dcb->target_lun != 0) {
3775
3776 struct DeviceCtlBlk *p;
3777 list_for_each_entry(p, &acb->dcb_list, list)
3778 if (p->target_id == dcb->target_id)
3779 break;
3780 dprintkdbg(DBG_1,
3781 "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
3782 dcb->target_id, dcb->target_lun,
3783 p->target_id, p->target_lun);
3784 dcb->sync_mode = p->sync_mode;
3785 dcb->sync_period = p->sync_period;
3786 dcb->min_nego_period = p->min_nego_period;
3787 dcb->sync_offset = p->sync_offset;
3788 dcb->inquiry7 = p->inquiry7;
3789 }
3790 return dcb;
3791}
3792
3793
3794
3795
3796
3797
3798
3799
3800static void adapter_add_device(struct AdapterCtlBlk *acb,
3801 struct DeviceCtlBlk *dcb)
3802{
3803
3804 dcb->acb = acb;
3805
3806
3807 if (list_empty(&acb->dcb_list))
3808 acb->dcb_run_robin = dcb;
3809
3810
3811 list_add_tail(&dcb->list, &acb->dcb_list);
3812
3813
3814 acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun);
3815 acb->children[dcb->target_id][dcb->target_lun] = dcb;
3816}
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828static void adapter_remove_device(struct AdapterCtlBlk *acb,
3829 struct DeviceCtlBlk *dcb)
3830{
3831 struct DeviceCtlBlk *i;
3832 struct DeviceCtlBlk *tmp;
3833 dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n",
3834 dcb->target_id, dcb->target_lun);
3835
3836
3837 if (acb->active_dcb == dcb)
3838 acb->active_dcb = NULL;
3839 if (acb->dcb_run_robin == dcb)
3840 acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb);
3841
3842
3843 list_for_each_entry_safe(i, tmp, &acb->dcb_list, list)
3844 if (dcb == i) {
3845 list_del(&i->list);
3846 break;
3847 }
3848
3849
3850 acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun);
3851 acb->children[dcb->target_id][dcb->target_lun] = NULL;
3852 dcb->acb = NULL;
3853}
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb,
3864 struct DeviceCtlBlk *dcb)
3865{
3866 if (list_size(&dcb->srb_going_list) > 1) {
3867 dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> "
3868 "Won't remove because of %i active requests.\n",
3869 dcb->target_id, dcb->target_lun,
3870 list_size(&dcb->srb_going_list));
3871 return;
3872 }
3873 adapter_remove_device(acb, dcb);
3874 kfree(dcb);
3875}
3876
3877
3878
3879
3880
3881
3882
3883
3884static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
3885{
3886 struct DeviceCtlBlk *dcb;
3887 struct DeviceCtlBlk *tmp;
3888 dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n",
3889 list_size(&acb->dcb_list));
3890
3891 list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
3892 adapter_remove_and_free_device(acb, dcb);
3893}
3894
3895
3896
3897
3898
3899
3900
3901
3902
3903static int dc395x_slave_alloc(struct scsi_device *scsi_device)
3904{
3905 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
3906 struct DeviceCtlBlk *dcb;
3907
3908 dcb = device_alloc(acb, scsi_device->id, scsi_device->lun);
3909 if (!dcb)
3910 return -ENOMEM;
3911 adapter_add_device(acb, dcb);
3912
3913 return 0;
3914}
3915
3916
3917
3918
3919
3920
3921
3922
3923static void dc395x_slave_destroy(struct scsi_device *scsi_device)
3924{
3925 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
3926 struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
3927 if (dcb)
3928 adapter_remove_and_free_device(acb, dcb);
3929}
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941static void trms1040_wait_30us(unsigned long io_port)
3942{
3943
3944 outb(5, io_port + TRM_S1040_GEN_TIMER);
3945 while (!(inb(io_port + TRM_S1040_GEN_STATUS) & GTIMEOUT))
3946 ;
3947}
3948
3949
3950
3951
3952
3953
3954
3955
3956
3957
3958static void trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
3959{
3960 int i;
3961 u8 send_data;
3962
3963
3964 for (i = 0; i < 3; i++, cmd <<= 1) {
3965 send_data = NVR_SELECT;
3966 if (cmd & 0x04)
3967 send_data |= NVR_BITOUT;
3968
3969 outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
3970 trms1040_wait_30us(io_port);
3971 outb((send_data | NVR_CLOCK),
3972 io_port + TRM_S1040_GEN_NVRAM);
3973 trms1040_wait_30us(io_port);
3974 }
3975
3976
3977 for (i = 0; i < 7; i++, addr <<= 1) {
3978 send_data = NVR_SELECT;
3979 if (addr & 0x40)
3980 send_data |= NVR_BITOUT;
3981
3982 outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
3983 trms1040_wait_30us(io_port);
3984 outb((send_data | NVR_CLOCK),
3985 io_port + TRM_S1040_GEN_NVRAM);
3986 trms1040_wait_30us(io_port);
3987 }
3988 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3989 trms1040_wait_30us(io_port);
3990}
3991
3992
3993
3994
3995
3996
3997
3998
3999
4000
4001
4002
4003static void trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
4004{
4005 int i;
4006 u8 send_data;
4007
4008
4009 trms1040_write_cmd(io_port, 0x05, addr);
4010
4011
4012 for (i = 0; i < 8; i++, byte <<= 1) {
4013 send_data = NVR_SELECT;
4014 if (byte & 0x80)
4015 send_data |= NVR_BITOUT;
4016
4017 outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
4018 trms1040_wait_30us(io_port);
4019 outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
4020 trms1040_wait_30us(io_port);
4021 }
4022 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
4023 trms1040_wait_30us(io_port);
4024
4025
4026 outb(0, io_port + TRM_S1040_GEN_NVRAM);
4027 trms1040_wait_30us(io_port);
4028
4029 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
4030 trms1040_wait_30us(io_port);
4031
4032
4033 while (1) {
4034 outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
4035 trms1040_wait_30us(io_port);
4036
4037 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
4038 trms1040_wait_30us(io_port);
4039
4040 if (inb(io_port + TRM_S1040_GEN_NVRAM) & NVR_BITIN)
4041 break;
4042 }
4043
4044
4045 outb(0, io_port + TRM_S1040_GEN_NVRAM);
4046}
4047
4048
4049
4050
4051
4052
4053
4054
4055
4056
4057static void trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
4058{
4059 u8 *b_eeprom = (u8 *)eeprom;
4060 u8 addr;
4061
4062
4063 outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
4064 io_port + TRM_S1040_GEN_CONTROL);
4065
4066
4067 trms1040_write_cmd(io_port, 0x04, 0xFF);
4068 outb(0, io_port + TRM_S1040_GEN_NVRAM);
4069 trms1040_wait_30us(io_port);
4070
4071
4072 for (addr = 0; addr < 128; addr++, b_eeprom++)
4073 trms1040_set_data(io_port, addr, *b_eeprom);
4074
4075
4076 trms1040_write_cmd(io_port, 0x04, 0x00);
4077 outb(0, io_port + TRM_S1040_GEN_NVRAM);
4078 trms1040_wait_30us(io_port);
4079
4080
4081 outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
4082 io_port + TRM_S1040_GEN_CONTROL);
4083}
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097static u8 trms1040_get_data(unsigned long io_port, u8 addr)
4098{
4099 int i;
4100 u8 read_byte;
4101 u8 result = 0;
4102
4103
4104 trms1040_write_cmd(io_port, 0x06, addr);
4105
4106
4107 for (i = 0; i < 8; i++) {
4108 outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
4109 trms1040_wait_30us(io_port);
4110 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
4111
4112
4113 read_byte = inb(io_port + TRM_S1040_GEN_NVRAM);
4114 result <<= 1;
4115 if (read_byte & NVR_BITIN)
4116 result |= 1;
4117
4118 trms1040_wait_30us(io_port);
4119 }
4120
4121
4122 outb(0, io_port + TRM_S1040_GEN_NVRAM);
4123 return result;
4124}
4125
4126
4127
4128
4129
4130
4131
4132
4133
4134
4135static void trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
4136{
4137 u8 *b_eeprom = (u8 *)eeprom;
4138 u8 addr;
4139
4140
4141 outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
4142 io_port + TRM_S1040_GEN_CONTROL);
4143
4144
4145 for (addr = 0; addr < 128; addr++, b_eeprom++)
4146 *b_eeprom = trms1040_get_data(io_port, addr);
4147
4148
4149 outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
4150 io_port + TRM_S1040_GEN_CONTROL);
4151}
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
4166{
4167 u16 *w_eeprom = (u16 *)eeprom;
4168 u16 w_addr;
4169 u16 cksum;
4170 u32 d_addr;
4171 u32 *d_eeprom;
4172
4173 trms1040_read_all(eeprom, io_port);
4174
4175 cksum = 0;
4176 for (w_addr = 0, w_eeprom = (u16 *)eeprom; w_addr < 64;
4177 w_addr++, w_eeprom++)
4178 cksum += *w_eeprom;
4179 if (cksum != 0x1234) {
4180
4181
4182
4183
4184 dprintkl(KERN_WARNING,
4185 "EEProm checksum error: using default values and options.\n");
4186 eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
4187 eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
4188 eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
4189 eeprom->sub_sys_id[1] =
4190 (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
4191 eeprom->sub_class = 0x00;
4192 eeprom->vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
4193 eeprom->vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
4194 eeprom->device_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
4195 eeprom->device_id[1] =
4196 (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
4197 eeprom->reserved = 0x00;
4198
4199 for (d_addr = 0, d_eeprom = (u32 *)eeprom->target;
4200 d_addr < 16; d_addr++, d_eeprom++)
4201 *d_eeprom = 0x00000077;
4202
4203 *d_eeprom++ = 0x04000F07;
4204 *d_eeprom++ = 0x00000015;
4205 for (d_addr = 0; d_addr < 12; d_addr++, d_eeprom++)
4206 *d_eeprom = 0x00;
4207
4208
4209 set_safe_settings();
4210 fix_settings();
4211 eeprom_override(eeprom);
4212
4213 eeprom->cksum = 0x00;
4214 for (w_addr = 0, cksum = 0, w_eeprom = (u16 *)eeprom;
4215 w_addr < 63; w_addr++, w_eeprom++)
4216 cksum += *w_eeprom;
4217
4218 *w_eeprom = 0x1234 - cksum;
4219 trms1040_write_all(eeprom, io_port);
4220 eeprom->delay_time = cfg_data[CFG_RESET_DELAY].value;
4221 } else {
4222 set_safe_settings();
4223 eeprom_index_to_delay(eeprom);
4224 eeprom_override(eeprom);
4225 }
4226}
4227
4228
4229
4230
4231
4232
4233
4234
4235static void print_eeprom_settings(struct NvRamType *eeprom)
4236{
4237 dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
4238 eeprom->scsi_id,
4239 eeprom->target[0].period,
4240 clock_speed[eeprom->target[0].period] / 10,
4241 clock_speed[eeprom->target[0].period] % 10,
4242 eeprom->target[0].cfg0);
4243 dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n",
4244 eeprom->channel_cfg, eeprom->max_tag,
4245 1 << eeprom->max_tag, eeprom->delay_time);
4246}
4247
4248
4249
4250static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
4251{
4252 int i;
4253 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
4254
4255 for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
4256 kfree(acb->srb_array[i].segment_x);
4257}
4258
4259
4260
4261
4262
4263static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
4264{
4265 const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1)
4266 *SEGMENTX_LEN;
4267 int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE;
4268 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
4269 int srb_idx = 0;
4270 unsigned i = 0;
4271 struct SGentry *uninitialized_var(ptr);
4272
4273 for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
4274 acb->srb_array[i].segment_x = NULL;
4275
4276 dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
4277 while (pages--) {
4278 ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
4279 if (!ptr) {
4280 adapter_sg_tables_free(acb);
4281 return 1;
4282 }
4283 dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n",
4284 PAGE_SIZE, ptr, srb_idx);
4285 i = 0;
4286 while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT)
4287 acb->srb_array[srb_idx++].segment_x =
4288 ptr + (i++ * DC395x_MAX_SG_LISTENTRY);
4289 }
4290 if (i < srbs_per_page)
4291 acb->srb.segment_x =
4292 ptr + (i * DC395x_MAX_SG_LISTENTRY);
4293 else
4294 dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
4295 return 0;
4296}
4297
4298
4299
4300
4301
4302
4303
4304
4305
4306
4307
4308
4309static void adapter_print_config(struct AdapterCtlBlk *acb)
4310{
4311 u8 bval;
4312
4313 bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS);
4314 dprintkl(KERN_INFO, "%sConnectors: ",
4315 ((bval & WIDESCSI) ? "(Wide) " : ""));
4316 if (!(bval & CON5068))
4317 printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50");
4318 if (!(bval & CON68))
4319 printk("int68%s ", !(bval & INT68HIGH) ? "" : "(50)");
4320 if (!(bval & CON50))
4321 printk("int50 ");
4322 if ((bval & (CON5068 | CON50 | CON68)) ==
4323 0 )
4324 printk(" Oops! (All 3?) ");
4325 bval = DC395x_read8(acb, TRM_S1040_GEN_CONTROL);
4326 printk(" Termination: ");
4327 if (bval & DIS_TERM)
4328 printk("Disabled\n");
4329 else {
4330 if (bval & AUTOTERM)
4331 printk("Auto ");
4332 if (bval & LOW8TERM)
4333 printk("Low ");
4334 if (bval & UP8TERM)
4335 printk("High ");
4336 printk("\n");
4337 }
4338}
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349
4350
4351
4352
4353static void adapter_init_params(struct AdapterCtlBlk *acb)
4354{
4355 struct NvRamType *eeprom = &acb->eeprom;
4356 int i;
4357
4358
4359
4360
4361
4362 INIT_LIST_HEAD(&acb->dcb_list);
4363 acb->dcb_run_robin = NULL;
4364 acb->active_dcb = NULL;
4365
4366 INIT_LIST_HEAD(&acb->srb_free_list);
4367
4368 acb->tmp_srb = &acb->srb;
4369 init_timer(&acb->waiting_timer);
4370 init_timer(&acb->selto_timer);
4371
4372 acb->srb_count = DC395x_MAX_SRB_CNT;
4373
4374 acb->sel_timeout = DC395x_SEL_TIMEOUT;
4375
4376
4377 acb->tag_max_num = 1 << eeprom->max_tag;
4378 if (acb->tag_max_num > 30)
4379 acb->tag_max_num = 30;
4380
4381 acb->acb_flag = 0;
4382 acb->gmode2 = eeprom->channel_cfg;
4383 acb->config = 0;
4384
4385 if (eeprom->channel_cfg & NAC_SCANLUN)
4386 acb->lun_chk = 1;
4387 acb->scan_devices = 1;
4388
4389 acb->scsi_host->this_id = eeprom->scsi_id;
4390 acb->hostid_bit = (1 << acb->scsi_host->this_id);
4391
4392 for (i = 0; i < DC395x_MAX_SCSI_ID; i++)
4393 acb->dcb_map[i] = 0;
4394
4395 acb->msg_len = 0;
4396
4397
4398 for (i = 0; i < acb->srb_count - 1; i++)
4399 srb_free_insert(acb, &acb->srb_array[i]);
4400}
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411
4412
4413
4414
4415static void adapter_init_scsi_host(struct Scsi_Host *host)
4416{
4417 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
4418 struct NvRamType *eeprom = &acb->eeprom;
4419
4420 host->max_cmd_len = 24;
4421 host->can_queue = DC395x_MAX_CMD_QUEUE;
4422 host->cmd_per_lun = DC395x_MAX_CMD_PER_LUN;
4423 host->this_id = (int)eeprom->scsi_id;
4424 host->io_port = acb->io_port_base;
4425 host->n_io_port = acb->io_port_len;
4426 host->dma_channel = -1;
4427 host->unique_id = acb->io_port_base;
4428 host->irq = acb->irq_level;
4429 acb->last_reset = jiffies;
4430
4431 host->max_id = 16;
4432 if (host->max_id - 1 == eeprom->scsi_id)
4433 host->max_id--;
4434
4435 if (eeprom->channel_cfg & NAC_SCANLUN)
4436 host->max_lun = 8;
4437 else
4438 host->max_lun = 1;
4439}
4440
4441
4442
4443
4444
4445
4446
4447
4448
4449
4450
4451static void adapter_init_chip(struct AdapterCtlBlk *acb)
4452{
4453 struct NvRamType *eeprom = &acb->eeprom;
4454
4455
4456 DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
4457 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
4458
4459
4460 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
4461
4462
4463 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
4464 udelay(20);
4465
4466
4467 acb->config = HCC_AUTOTERM | HCC_PARITY;
4468 if (DC395x_read8(acb, TRM_S1040_GEN_STATUS) & WIDESCSI)
4469 acb->config |= HCC_WIDE_CARD;
4470
4471 if (eeprom->channel_cfg & NAC_POWERON_SCSI_RESET)
4472 acb->config |= HCC_SCSI_RESET;
4473
4474 if (acb->config & HCC_SCSI_RESET) {
4475 dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n");
4476 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
4477
4478
4479
4480 udelay(500);
4481
4482 acb->last_reset =
4483 jiffies + HZ / 2 +
4484 HZ * acb->eeprom.delay_time;
4485
4486
4487 }
4488}
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500
4501
4502
4503
4504static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
4505 u32 io_port_len, unsigned int irq)
4506{
4507 if (!request_region(io_port, io_port_len, DC395X_NAME)) {
4508 dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
4509 goto failed;
4510 }
4511
4512 acb->io_port_base = io_port;
4513 acb->io_port_len = io_port_len;
4514
4515 if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) {
4516
4517 dprintkl(KERN_INFO, "Failed to register IRQ\n");
4518 goto failed;
4519 }
4520
4521 acb->irq_level = irq;
4522
4523
4524 check_eeprom(&acb->eeprom, io_port);
4525 print_eeprom_settings(&acb->eeprom);
4526
4527
4528 adapter_init_params(acb);
4529
4530
4531 adapter_print_config(acb);
4532
4533 if (adapter_sg_tables_alloc(acb)) {
4534 dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n");
4535 goto failed;
4536 }
4537 adapter_init_scsi_host(acb->scsi_host);
4538 adapter_init_chip(acb);
4539 set_basic_config(acb);
4540
4541 dprintkdbg(DBG_0,
4542 "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p "
4543 "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
4544 acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk),
4545 sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk));
4546 return 0;
4547
4548failed:
4549 if (acb->irq_level)
4550 free_irq(acb->irq_level, acb);
4551 if (acb->io_port_base)
4552 release_region(acb->io_port_base, acb->io_port_len);
4553 adapter_sg_tables_free(acb);
4554
4555 return 1;
4556}
4557
4558
4559
4560
4561
4562
4563
4564
4565
4566static void adapter_uninit_chip(struct AdapterCtlBlk *acb)
4567{
4568
4569 DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0);
4570 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0);
4571
4572
4573 if (acb->config & HCC_SCSI_RESET)
4574 reset_scsi_bus(acb);
4575
4576
4577 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
4578}
4579
4580
4581
4582
4583
4584
4585
4586
4587
4588
4589static void adapter_uninit(struct AdapterCtlBlk *acb)
4590{
4591 unsigned long flags;
4592 DC395x_LOCK_IO(acb->scsi_host, flags);
4593
4594
4595 if (timer_pending(&acb->waiting_timer))
4596 del_timer(&acb->waiting_timer);
4597 if (timer_pending(&acb->selto_timer))
4598 del_timer(&acb->selto_timer);
4599
4600 adapter_uninit_chip(acb);
4601 adapter_remove_and_free_all_devices(acb);
4602 DC395x_UNLOCK_IO(acb->scsi_host, flags);
4603
4604 if (acb->irq_level)
4605 free_irq(acb->irq_level, acb);
4606 if (acb->io_port_base)
4607 release_region(acb->io_port_base, acb->io_port_len);
4608
4609 adapter_sg_tables_free(acb);
4610}
4611
4612
4613#undef SPRINTF
4614#define SPRINTF(args...) seq_printf(m,##args)
4615
4616#undef YESNO
4617#define YESNO(YN) \
4618 if (YN) SPRINTF(" Yes ");\
4619 else SPRINTF(" No ")
4620
4621static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
4622{
4623 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
4624 int spd, spd1;
4625 struct DeviceCtlBlk *dcb;
4626 unsigned long flags;
4627 int dev;
4628
4629 SPRINTF(DC395X_BANNER " PCI SCSI Host Adapter\n");
4630 SPRINTF(" Driver Version " DC395X_VERSION "\n");
4631
4632 DC395x_LOCK_IO(acb->scsi_host, flags);
4633
4634 SPRINTF("SCSI Host Nr %i, ", host->host_no);
4635 SPRINTF("DC395U/UW/F DC315/U %s\n",
4636 (acb->config & HCC_WIDE_CARD) ? "Wide" : "");
4637 SPRINTF("io_port_base 0x%04lx, ", acb->io_port_base);
4638 SPRINTF("irq_level 0x%04x, ", acb->irq_level);
4639 SPRINTF(" SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
4640
4641 SPRINTF("MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun);
4642 SPRINTF("AdapterID %i\n", host->this_id);
4643
4644 SPRINTF("tag_max_num %i", acb->tag_max_num);
4645
4646 SPRINTF(", FilterCfg 0x%02x",
4647 DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1));
4648 SPRINTF(", DelayReset %is\n", acb->eeprom.delay_time);
4649
4650
4651 SPRINTF("Nr of DCBs: %i\n", list_size(&acb->dcb_list));
4652 SPRINTF
4653 ("Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
4654 acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
4655 acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
4656 acb->dcb_map[6], acb->dcb_map[7]);
4657 SPRINTF
4658 (" %02x %02x %02x %02x %02x %02x %02x %02x\n",
4659 acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
4660 acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
4661 acb->dcb_map[14], acb->dcb_map[15]);
4662
4663 SPRINTF
4664 ("Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
4665
4666 dev = 0;
4667 list_for_each_entry(dcb, &acb->dcb_list, list) {
4668 int nego_period;
4669 SPRINTF("%02i %02i %02i ", dev, dcb->target_id,
4670 dcb->target_lun);
4671 YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
4672 YESNO(dcb->sync_offset);
4673 YESNO(dcb->sync_period & WIDE_SYNC);
4674 YESNO(dcb->dev_mode & NTC_DO_DISCONNECT);
4675 YESNO(dcb->dev_mode & NTC_DO_SEND_START);
4676 YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
4677 nego_period = clock_period[dcb->sync_period & 0x07] << 2;
4678 if (dcb->sync_offset)
4679 SPRINTF(" %03i ns ", nego_period);
4680 else
4681 SPRINTF(" (%03i ns)", (dcb->min_nego_period << 2));
4682
4683 if (dcb->sync_offset & 0x0f) {
4684 spd = 1000 / (nego_period);
4685 spd1 = 1000 % (nego_period);
4686 spd1 = (spd1 * 10 + nego_period / 2) / (nego_period);
4687 SPRINTF(" %2i.%1i M %02i ", spd, spd1,
4688 (dcb->sync_offset & 0x0f));
4689 } else
4690 SPRINTF(" ");
4691
4692
4693 SPRINTF(" %02i\n", dcb->max_command);
4694 dev++;
4695 }
4696
4697 if (timer_pending(&acb->waiting_timer))
4698 SPRINTF("Waiting queue timer running\n");
4699 else
4700 SPRINTF("\n");
4701
4702 list_for_each_entry(dcb, &acb->dcb_list, list) {
4703 struct ScsiReqBlk *srb;
4704 if (!list_empty(&dcb->srb_waiting_list))
4705 SPRINTF("DCB (%02i-%i): Waiting: %i:",
4706 dcb->target_id, dcb->target_lun,
4707 list_size(&dcb->srb_waiting_list));
4708 list_for_each_entry(srb, &dcb->srb_waiting_list, list)
4709 SPRINTF(" %p", srb->cmd);
4710 if (!list_empty(&dcb->srb_going_list))
4711 SPRINTF("\nDCB (%02i-%i): Going : %i:",
4712 dcb->target_id, dcb->target_lun,
4713 list_size(&dcb->srb_going_list));
4714 list_for_each_entry(srb, &dcb->srb_going_list, list)
4715 SPRINTF(" %p", srb->cmd);
4716 if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
4717 SPRINTF("\n");
4718 }
4719
4720 if (debug_enabled(DBG_1)) {
4721 SPRINTF("DCB list for ACB %p:\n", acb);
4722 list_for_each_entry(dcb, &acb->dcb_list, list) {
4723 SPRINTF("%p -> ", dcb);
4724 }
4725 SPRINTF("END\n");
4726 }
4727
4728 DC395x_UNLOCK_IO(acb->scsi_host, flags);
4729 return 0;
4730}
4731
4732
4733static struct scsi_host_template dc395x_driver_template = {
4734 .module = THIS_MODULE,
4735 .proc_name = DC395X_NAME,
4736 .show_info = dc395x_show_info,
4737 .name = DC395X_BANNER " " DC395X_VERSION,
4738 .queuecommand = dc395x_queue_command,
4739 .bios_param = dc395x_bios_param,
4740 .slave_alloc = dc395x_slave_alloc,
4741 .slave_destroy = dc395x_slave_destroy,
4742 .can_queue = DC395x_MAX_CAN_QUEUE,
4743 .this_id = 7,
4744 .sg_tablesize = DC395x_MAX_SG_TABLESIZE,
4745 .cmd_per_lun = DC395x_MAX_CMD_PER_LUN,
4746 .eh_abort_handler = dc395x_eh_abort,
4747 .eh_bus_reset_handler = dc395x_eh_bus_reset,
4748 .use_clustering = DISABLE_CLUSTERING,
4749};
4750
4751
4752
4753
4754
4755
4756static void banner_display(void)
4757{
4758 static int banner_done = 0;
4759 if (!banner_done)
4760 {
4761 dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION);
4762 banner_done = 1;
4763 }
4764}
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
4781{
4782 struct Scsi_Host *scsi_host = NULL;
4783 struct AdapterCtlBlk *acb = NULL;
4784 unsigned long io_port_base;
4785 unsigned int io_port_len;
4786 unsigned int irq;
4787
4788 dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev));
4789 banner_display();
4790
4791 if (pci_enable_device(dev))
4792 {
4793 dprintkl(KERN_INFO, "PCI Enable device failed.\n");
4794 return -ENODEV;
4795 }
4796 io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK;
4797 io_port_len = pci_resource_len(dev, 0);
4798 irq = dev->irq;
4799 dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq);
4800
4801
4802 scsi_host = scsi_host_alloc(&dc395x_driver_template,
4803 sizeof(struct AdapterCtlBlk));
4804 if (!scsi_host) {
4805 dprintkl(KERN_INFO, "scsi_host_alloc failed\n");
4806 goto fail;
4807 }
4808 acb = (struct AdapterCtlBlk*)scsi_host->hostdata;
4809 acb->scsi_host = scsi_host;
4810 acb->dev = dev;
4811
4812
4813 if (adapter_init(acb, io_port_base, io_port_len, irq)) {
4814 dprintkl(KERN_INFO, "adapter init failed\n");
4815 goto fail;
4816 }
4817
4818 pci_set_master(dev);
4819
4820
4821 if (scsi_add_host(scsi_host, &dev->dev)) {
4822 dprintkl(KERN_ERR, "scsi_add_host failed\n");
4823 goto fail;
4824 }
4825 pci_set_drvdata(dev, scsi_host);
4826 scsi_scan_host(scsi_host);
4827
4828 return 0;
4829
4830fail:
4831 if (acb != NULL)
4832 adapter_uninit(acb);
4833 if (scsi_host != NULL)
4834 scsi_host_put(scsi_host);
4835 pci_disable_device(dev);
4836 return -ENODEV;
4837}
4838
4839
4840
4841
4842
4843
4844
4845
4846static void dc395x_remove_one(struct pci_dev *dev)
4847{
4848 struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
4849 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
4850
4851 dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb);
4852
4853 scsi_remove_host(scsi_host);
4854 adapter_uninit(acb);
4855 pci_disable_device(dev);
4856 scsi_host_put(scsi_host);
4857}
4858
4859
4860static struct pci_device_id dc395x_pci_table[] = {
4861 {
4862 .vendor = PCI_VENDOR_ID_TEKRAM,
4863 .device = PCI_DEVICE_ID_TEKRAM_TRMS1040,
4864 .subvendor = PCI_ANY_ID,
4865 .subdevice = PCI_ANY_ID,
4866 },
4867 {}
4868};
4869MODULE_DEVICE_TABLE(pci, dc395x_pci_table);
4870
4871
4872static struct pci_driver dc395x_driver = {
4873 .name = DC395X_NAME,
4874 .id_table = dc395x_pci_table,
4875 .probe = dc395x_init_one,
4876 .remove = dc395x_remove_one,
4877};
4878
4879
4880
4881
4882
4883
4884
4885static int __init dc395x_module_init(void)
4886{
4887 return pci_register_driver(&dc395x_driver);
4888}
4889
4890
4891
4892
4893
4894static void __exit dc395x_module_exit(void)
4895{
4896 pci_unregister_driver(&dc395x_driver);
4897}
4898
4899
4900module_init(dc395x_module_init);
4901module_exit(dc395x_module_exit);
4902
4903MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff");
4904MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series");
4905MODULE_LICENSE("GPL");
4906