1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/delay.h>
52#include <linux/ctype.h>
53#include <linux/blkdev.h>
54#include <linux/interrupt.h>
55#include <linux/init.h>
56#include <linux/spinlock.h>
57#include <linux/pci.h>
58#include <linux/list.h>
59#include <linux/vmalloc.h>
60#include <linux/slab.h>
61#include <asm/io.h>
62
63#include <scsi/scsi.h>
64#include <scsi/scsicam.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_host.h>
68
69#include "dc395x.h"
70
71#define DC395X_NAME "dc395x"
72#define DC395X_BANNER "Tekram DC395(U/UW/F), DC315(U) - ASIC TRM-S1040"
73#define DC395X_VERSION "v2.05, 2004/03/08"
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92#define DBG_KG 0x0001
93#define DBG_0 0x0002
94#define DBG_1 0x0004
95#define DBG_SG 0x0020
96#define DBG_FIFO 0x0040
97#define DBG_PIO 0x0080
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112#define dprintkl(level, format, arg...) \
113 printk(level DC395X_NAME ": " format , ## arg)
114
115
116#ifdef DEBUG_MASK
117
118
119
120
121
122
123#define dprintkdbg(type, format, arg...) \
124 do { \
125 if ((type) & (DEBUG_MASK)) \
126 dprintkl(KERN_DEBUG , format , ## arg); \
127 } while (0)
128
129
130
131
132#define debug_enabled(type) ((DEBUG_MASK) & (type))
133
134#else
135
136
137
138#define dprintkdbg(type, format, arg...) \
139 do {} while (0)
140#define debug_enabled(type) (0)
141
142#endif
143
144
145#ifndef PCI_VENDOR_ID_TEKRAM
146#define PCI_VENDOR_ID_TEKRAM 0x1DE1
147#endif
148#ifndef PCI_DEVICE_ID_TEKRAM_TRMS1040
149#define PCI_DEVICE_ID_TEKRAM_TRMS1040 0x0391
150#endif
151
152
153#define DC395x_LOCK_IO(dev,flags) spin_lock_irqsave(((struct Scsi_Host *)dev)->host_lock, flags)
154#define DC395x_UNLOCK_IO(dev,flags) spin_unlock_irqrestore(((struct Scsi_Host *)dev)->host_lock, flags)
155
156#define DC395x_read8(acb,address) (u8)(inb(acb->io_port_base + (address)))
157#define DC395x_read16(acb,address) (u16)(inw(acb->io_port_base + (address)))
158#define DC395x_read32(acb,address) (u32)(inl(acb->io_port_base + (address)))
159#define DC395x_write8(acb,address,value) outb((value), acb->io_port_base + (address))
160#define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
161#define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
162
163
164#define RES_TARGET 0x000000FF
165#define RES_TARGET_LNX STATUS_MASK
166#define RES_ENDMSG 0x0000FF00
167#define RES_DID 0x00FF0000
168#define RES_DRV 0xFF000000
169
170#define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
171#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
172
173#define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
174#define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
175#define SET_RES_MSG(who,msg) { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; }
176#define SET_RES_DID(who,did) { who &= ~RES_DID; who |= (int)(did) << 16; }
177#define SET_RES_DRV(who,drv) { who &= ~RES_DRV; who |= (int)(drv) << 24; }
178
179#define TAG_NONE 255
180
181
182
183
184
185
186#define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
187
188
189struct SGentry {
190 u32 address;
191 u32 length;
192};
193
194
195struct NVRamTarget {
196 u8 cfg0;
197 u8 period;
198 u8 cfg2;
199 u8 cfg3;
200};
201
202struct NvRamType {
203 u8 sub_vendor_id[2];
204 u8 sub_sys_id[2];
205 u8 sub_class;
206 u8 vendor_id[2];
207 u8 device_id[2];
208 u8 reserved;
209 struct NVRamTarget target[DC395x_MAX_SCSI_ID];
210
211
212
213
214
215
216 u8 scsi_id;
217 u8 channel_cfg;
218 u8 delay_time;
219 u8 max_tag;
220 u8 reserved0;
221 u8 boot_target;
222 u8 boot_lun;
223 u8 reserved1;
224 u16 reserved2[22];
225 u16 cksum;
226};
227
228struct ScsiReqBlk {
229 struct list_head list;
230 struct DeviceCtlBlk *dcb;
231 struct scsi_cmnd *cmd;
232
233 struct SGentry *segment_x;
234 dma_addr_t sg_bus_addr;
235
236 u8 sg_count;
237 u8 sg_index;
238 size_t total_xfer_length;
239 size_t request_length;
240
241
242
243
244
245
246
247
248
249 size_t xferred;
250
251 u16 state;
252
253 u8 msgin_buf[6];
254 u8 msgout_buf[6];
255
256 u8 adapter_status;
257 u8 target_status;
258 u8 msg_count;
259 u8 end_message;
260
261 u8 tag_number;
262 u8 status;
263 u8 retry_count;
264 u8 flag;
265
266 u8 scsi_phase;
267};
268
269struct DeviceCtlBlk {
270 struct list_head list;
271 struct AdapterCtlBlk *acb;
272 struct list_head srb_going_list;
273 struct list_head srb_waiting_list;
274
275 struct ScsiReqBlk *active_srb;
276 u32 tag_mask;
277
278 u16 max_command;
279
280 u8 target_id;
281 u8 target_lun;
282 u8 identify_msg;
283 u8 dev_mode;
284
285 u8 inquiry7;
286 u8 sync_mode;
287 u8 min_nego_period;
288 u8 sync_period;
289
290 u8 sync_offset;
291 u8 flag;
292 u8 dev_type;
293 u8 init_tcq_flag;
294};
295
296struct AdapterCtlBlk {
297 struct Scsi_Host *scsi_host;
298
299 unsigned long io_port_base;
300 unsigned long io_port_len;
301
302 struct list_head dcb_list;
303 struct DeviceCtlBlk *dcb_run_robin;
304 struct DeviceCtlBlk *active_dcb;
305
306 struct list_head srb_free_list;
307 struct ScsiReqBlk *tmp_srb;
308 struct timer_list waiting_timer;
309 struct timer_list selto_timer;
310
311 unsigned long last_reset;
312
313 u16 srb_count;
314
315 u8 sel_timeout;
316
317 unsigned int irq_level;
318 u8 tag_max_num;
319 u8 acb_flag;
320 u8 gmode2;
321
322 u8 config;
323 u8 lun_chk;
324 u8 scan_devices;
325 u8 hostid_bit;
326
327 u8 dcb_map[DC395x_MAX_SCSI_ID];
328 struct DeviceCtlBlk *children[DC395x_MAX_SCSI_ID][32];
329
330 struct pci_dev *dev;
331
332 u8 msg_len;
333
334 struct ScsiReqBlk srb_array[DC395x_MAX_SRB_CNT];
335 struct ScsiReqBlk srb;
336
337 struct NvRamType eeprom;
338};
339
340
341
342
343
344static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
345 u16 *pscsi_status);
346static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
347 u16 *pscsi_status);
348static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
349 u16 *pscsi_status);
350static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
351 u16 *pscsi_status);
352static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
353 u16 *pscsi_status);
354static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
355 u16 *pscsi_status);
356static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
357 u16 *pscsi_status);
358static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
359 u16 *pscsi_status);
360static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
361 u16 *pscsi_status);
362static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
363 u16 *pscsi_status);
364static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
365 u16 *pscsi_status);
366static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
367 u16 *pscsi_status);
368static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
369 u16 *pscsi_status);
370static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
371 u16 *pscsi_status);
372static void set_basic_config(struct AdapterCtlBlk *acb);
373static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
374 struct ScsiReqBlk *srb);
375static void reset_scsi_bus(struct AdapterCtlBlk *acb);
376static void data_io_transfer(struct AdapterCtlBlk *acb,
377 struct ScsiReqBlk *srb, u16 io_dir);
378static void disconnect(struct AdapterCtlBlk *acb);
379static void reselect(struct AdapterCtlBlk *acb);
380static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
381 struct ScsiReqBlk *srb);
382static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
383 struct ScsiReqBlk *srb);
384static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
385 struct ScsiReqBlk *srb);
386static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_code,
387 struct scsi_cmnd *cmd, u8 force);
388static void scsi_reset_detect(struct AdapterCtlBlk *acb);
389static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb);
390static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
391 struct ScsiReqBlk *srb);
392static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
393 struct ScsiReqBlk *srb);
394static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
395 struct ScsiReqBlk *srb);
396static void set_xfer_rate(struct AdapterCtlBlk *acb,
397 struct DeviceCtlBlk *dcb);
398static void waiting_timeout(struct timer_list *t);
399
400
401
402
403
404static u16 current_sync_offset = 0;
405
406static void *dc395x_scsi_phase0[] = {
407 data_out_phase0,
408 data_in_phase0,
409 command_phase0,
410 status_phase0,
411 nop0,
412 nop0,
413 msgout_phase0,
414 msgin_phase0,
415};
416
417static void *dc395x_scsi_phase1[] = {
418 data_out_phase1,
419 data_in_phase1,
420 command_phase1,
421 status_phase1,
422 nop1,
423 nop1,
424 msgout_phase1,
425 msgin_phase1,
426};
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 };
451static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 };
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467#define CFG_ADAPTER_ID 0
468#define CFG_MAX_SPEED 1
469#define CFG_DEV_MODE 2
470#define CFG_ADAPTER_MODE 3
471#define CFG_TAGS 4
472#define CFG_RESET_DELAY 5
473
474#define CFG_NUM 6
475
476
477
478
479
480
481#define CFG_PARAM_UNSET -1
482
483
484
485
486
487struct ParameterData {
488 int value;
489 int min;
490 int max;
491 int def;
492 int safe;
493};
494static struct ParameterData cfg_data[] = {
495 {
496 CFG_PARAM_UNSET,
497 0,
498 15,
499 7,
500 7
501 },
502 {
503 CFG_PARAM_UNSET,
504 0,
505 7,
506 1,
507 4,
508 },
509 {
510 CFG_PARAM_UNSET,
511 0,
512 0x3f,
513 NTC_DO_PARITY_CHK | NTC_DO_DISCONNECT | NTC_DO_SYNC_NEGO |
514 NTC_DO_WIDE_NEGO | NTC_DO_TAG_QUEUEING |
515 NTC_DO_SEND_START,
516 NTC_DO_PARITY_CHK | NTC_DO_SEND_START
517 },
518 {
519 CFG_PARAM_UNSET,
520 0,
521 0x2f,
522 NAC_SCANLUN |
523 NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET
524 ,
525 NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08
526 },
527 {
528 CFG_PARAM_UNSET,
529 0,
530 5,
531 3,
532 2,
533 },
534 {
535 CFG_PARAM_UNSET,
536 0,
537 180,
538 1,
539 10,
540 }
541};
542
543
544
545
546
547
548
549static bool use_safe_settings = 0;
550module_param_named(safe, use_safe_settings, bool, 0);
551MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false");
552
553
554module_param_named(adapter_id, cfg_data[CFG_ADAPTER_ID].value, int, 0);
555MODULE_PARM_DESC(adapter_id, "Adapter SCSI ID. Default 7 (0-15)");
556
557module_param_named(max_speed, cfg_data[CFG_MAX_SPEED].value, int, 0);
558MODULE_PARM_DESC(max_speed, "Maximum bus speed. Default 1 (0-7) Speeds: 0=20, 1=13.3, 2=10, 3=8, 4=6.7, 5=5.8, 6=5, 7=4 Mhz");
559
560module_param_named(dev_mode, cfg_data[CFG_DEV_MODE].value, int, 0);
561MODULE_PARM_DESC(dev_mode, "Device mode.");
562
563module_param_named(adapter_mode, cfg_data[CFG_ADAPTER_MODE].value, int, 0);
564MODULE_PARM_DESC(adapter_mode, "Adapter mode.");
565
566module_param_named(tags, cfg_data[CFG_TAGS].value, int, 0);
567MODULE_PARM_DESC(tags, "Number of tags (1<<x). Default 3 (0-5)");
568
569module_param_named(reset_delay, cfg_data[CFG_RESET_DELAY].value, int, 0);
570MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)");
571
572
573
574
575
576
577static void set_safe_settings(void)
578{
579 if (use_safe_settings)
580 {
581 int i;
582
583 dprintkl(KERN_INFO, "Using safe settings.\n");
584 for (i = 0; i < CFG_NUM; i++)
585 {
586 cfg_data[i].value = cfg_data[i].safe;
587 }
588 }
589}
590
591
592
593
594
595
596static void fix_settings(void)
597{
598 int i;
599
600 dprintkdbg(DBG_1,
601 "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x "
602 "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n",
603 cfg_data[CFG_ADAPTER_ID].value,
604 cfg_data[CFG_MAX_SPEED].value,
605 cfg_data[CFG_DEV_MODE].value,
606 cfg_data[CFG_ADAPTER_MODE].value,
607 cfg_data[CFG_TAGS].value,
608 cfg_data[CFG_RESET_DELAY].value);
609 for (i = 0; i < CFG_NUM; i++)
610 {
611 if (cfg_data[i].value < cfg_data[i].min
612 || cfg_data[i].value > cfg_data[i].max)
613 cfg_data[i].value = cfg_data[i].def;
614 }
615}
616
617
618
619
620
621
622
623static char eeprom_index_to_delay_map[] =
624 { 1, 3, 5, 10, 16, 30, 60, 120 };
625
626
627
628
629
630
631
632
633static void eeprom_index_to_delay(struct NvRamType *eeprom)
634{
635 eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time];
636}
637
638
639
640
641
642
643
644
645
646static int delay_to_eeprom_index(int delay)
647{
648 u8 idx = 0;
649 while (idx < 7 && eeprom_index_to_delay_map[idx] < delay)
650 idx++;
651 return idx;
652}
653
654
655
656
657
658
659
660
661
662static void eeprom_override(struct NvRamType *eeprom)
663{
664 u8 id;
665
666
667 if (cfg_data[CFG_ADAPTER_ID].value != CFG_PARAM_UNSET)
668 eeprom->scsi_id = (u8)cfg_data[CFG_ADAPTER_ID].value;
669
670 if (cfg_data[CFG_ADAPTER_MODE].value != CFG_PARAM_UNSET)
671 eeprom->channel_cfg = (u8)cfg_data[CFG_ADAPTER_MODE].value;
672
673 if (cfg_data[CFG_RESET_DELAY].value != CFG_PARAM_UNSET)
674 eeprom->delay_time = delay_to_eeprom_index(
675 cfg_data[CFG_RESET_DELAY].value);
676
677 if (cfg_data[CFG_TAGS].value != CFG_PARAM_UNSET)
678 eeprom->max_tag = (u8)cfg_data[CFG_TAGS].value;
679
680
681 for (id = 0; id < DC395x_MAX_SCSI_ID; id++) {
682 if (cfg_data[CFG_DEV_MODE].value != CFG_PARAM_UNSET)
683 eeprom->target[id].cfg0 =
684 (u8)cfg_data[CFG_DEV_MODE].value;
685
686 if (cfg_data[CFG_MAX_SPEED].value != CFG_PARAM_UNSET)
687 eeprom->target[id].period =
688 (u8)cfg_data[CFG_MAX_SPEED].value;
689
690 }
691}
692
693
694
695
696
697static unsigned int list_size(struct list_head *head)
698{
699 unsigned int count = 0;
700 struct list_head *pos;
701 list_for_each(pos, head)
702 count++;
703 return count;
704}
705
706
707static struct DeviceCtlBlk *dcb_get_next(struct list_head *head,
708 struct DeviceCtlBlk *pos)
709{
710 int use_next = 0;
711 struct DeviceCtlBlk* next = NULL;
712 struct DeviceCtlBlk* i;
713
714 if (list_empty(head))
715 return NULL;
716
717
718 list_for_each_entry(i, head, list)
719 if (use_next) {
720 next = i;
721 break;
722 } else if (i == pos) {
723 use_next = 1;
724 }
725
726 if (!next)
727 list_for_each_entry(i, head, list) {
728 next = i;
729 break;
730 }
731
732 return next;
733}
734
735
736static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
737{
738 if (srb->tag_number < 255) {
739 dcb->tag_mask &= ~(1 << srb->tag_number);
740 srb->tag_number = 255;
741 }
742}
743
744
745
746static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
747 struct list_head *head)
748{
749 struct ScsiReqBlk *i;
750 list_for_each_entry(i, head, list)
751 if (i->cmd == cmd)
752 return i;
753 return NULL;
754}
755
756
757static struct ScsiReqBlk *srb_get_free(struct AdapterCtlBlk *acb)
758{
759 struct list_head *head = &acb->srb_free_list;
760 struct ScsiReqBlk *srb = NULL;
761
762 if (!list_empty(head)) {
763 srb = list_entry(head->next, struct ScsiReqBlk, list);
764 list_del(head->next);
765 dprintkdbg(DBG_0, "srb_get_free: srb=%p\n", srb);
766 }
767 return srb;
768}
769
770
771static void srb_free_insert(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
772{
773 dprintkdbg(DBG_0, "srb_free_insert: srb=%p\n", srb);
774 list_add_tail(&srb->list, &acb->srb_free_list);
775}
776
777
778static void srb_waiting_insert(struct DeviceCtlBlk *dcb,
779 struct ScsiReqBlk *srb)
780{
781 dprintkdbg(DBG_0, "srb_waiting_insert: (0x%p) <%02i-%i> srb=%p\n",
782 srb->cmd, dcb->target_id, dcb->target_lun, srb);
783 list_add(&srb->list, &dcb->srb_waiting_list);
784}
785
786
787static void srb_waiting_append(struct DeviceCtlBlk *dcb,
788 struct ScsiReqBlk *srb)
789{
790 dprintkdbg(DBG_0, "srb_waiting_append: (0x%p) <%02i-%i> srb=%p\n",
791 srb->cmd, dcb->target_id, dcb->target_lun, srb);
792 list_add_tail(&srb->list, &dcb->srb_waiting_list);
793}
794
795
796static void srb_going_append(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
797{
798 dprintkdbg(DBG_0, "srb_going_append: (0x%p) <%02i-%i> srb=%p\n",
799 srb->cmd, dcb->target_id, dcb->target_lun, srb);
800 list_add_tail(&srb->list, &dcb->srb_going_list);
801}
802
803
804static void srb_going_remove(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
805{
806 struct ScsiReqBlk *i;
807 struct ScsiReqBlk *tmp;
808 dprintkdbg(DBG_0, "srb_going_remove: (0x%p) <%02i-%i> srb=%p\n",
809 srb->cmd, dcb->target_id, dcb->target_lun, srb);
810
811 list_for_each_entry_safe(i, tmp, &dcb->srb_going_list, list)
812 if (i == srb) {
813 list_del(&srb->list);
814 break;
815 }
816}
817
818
819static void srb_waiting_remove(struct DeviceCtlBlk *dcb,
820 struct ScsiReqBlk *srb)
821{
822 struct ScsiReqBlk *i;
823 struct ScsiReqBlk *tmp;
824 dprintkdbg(DBG_0, "srb_waiting_remove: (0x%p) <%02i-%i> srb=%p\n",
825 srb->cmd, dcb->target_id, dcb->target_lun, srb);
826
827 list_for_each_entry_safe(i, tmp, &dcb->srb_waiting_list, list)
828 if (i == srb) {
829 list_del(&srb->list);
830 break;
831 }
832}
833
834
835static void srb_going_to_waiting_move(struct DeviceCtlBlk *dcb,
836 struct ScsiReqBlk *srb)
837{
838 dprintkdbg(DBG_0,
839 "srb_going_to_waiting_move: (0x%p) <%02i-%i> srb=%p\n",
840 srb->cmd, dcb->target_id, dcb->target_lun, srb);
841 list_move(&srb->list, &dcb->srb_waiting_list);
842}
843
844
845static void srb_waiting_to_going_move(struct DeviceCtlBlk *dcb,
846 struct ScsiReqBlk *srb)
847{
848 dprintkdbg(DBG_0,
849 "srb_waiting_to_going_move: (0x%p) <%02i-%i> srb=%p\n",
850 srb->cmd, dcb->target_id, dcb->target_lun, srb);
851 list_move(&srb->list, &dcb->srb_going_list);
852}
853
854
855
856static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
857{
858 if (timer_pending(&acb->waiting_timer))
859 return;
860 if (time_before(jiffies + to, acb->last_reset - HZ / 2))
861 acb->waiting_timer.expires =
862 acb->last_reset - HZ / 2 + 1;
863 else
864 acb->waiting_timer.expires = jiffies + to + 1;
865 add_timer(&acb->waiting_timer);
866}
867
868
869
870static void waiting_process_next(struct AdapterCtlBlk *acb)
871{
872 struct DeviceCtlBlk *start = NULL;
873 struct DeviceCtlBlk *pos;
874 struct DeviceCtlBlk *dcb;
875 struct ScsiReqBlk *srb;
876 struct list_head *dcb_list_head = &acb->dcb_list;
877
878 if (acb->active_dcb
879 || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV)))
880 return;
881
882 if (timer_pending(&acb->waiting_timer))
883 del_timer(&acb->waiting_timer);
884
885 if (list_empty(dcb_list_head))
886 return;
887
888
889
890
891
892 list_for_each_entry(dcb, dcb_list_head, list)
893 if (dcb == acb->dcb_run_robin) {
894 start = dcb;
895 break;
896 }
897 if (!start) {
898
899 start = list_entry(dcb_list_head->next, typeof(*start), list);
900 acb->dcb_run_robin = start;
901 }
902
903
904
905
906
907
908 pos = start;
909 do {
910 struct list_head *waiting_list_head = &pos->srb_waiting_list;
911
912
913 acb->dcb_run_robin = dcb_get_next(dcb_list_head,
914 acb->dcb_run_robin);
915
916 if (list_empty(waiting_list_head) ||
917 pos->max_command <= list_size(&pos->srb_going_list)) {
918
919 pos = dcb_get_next(dcb_list_head, pos);
920 } else {
921 srb = list_entry(waiting_list_head->next,
922 struct ScsiReqBlk, list);
923
924
925 if (!start_scsi(acb, pos, srb))
926 srb_waiting_to_going_move(pos, srb);
927 else
928 waiting_set_timer(acb, HZ/50);
929 break;
930 }
931 } while (pos != start);
932}
933
934
935
936static void waiting_timeout(struct timer_list *t)
937{
938 unsigned long flags;
939 struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer);
940 dprintkdbg(DBG_1,
941 "waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
942 DC395x_LOCK_IO(acb->scsi_host, flags);
943 waiting_process_next(acb);
944 DC395x_UNLOCK_IO(acb->scsi_host, flags);
945}
946
947
948
949static struct DeviceCtlBlk *find_dcb(struct AdapterCtlBlk *acb, u8 id, u8 lun)
950{
951 return acb->children[id][lun];
952}
953
954
955
956static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
957{
958 struct DeviceCtlBlk *dcb = srb->dcb;
959
960 if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
961 acb->active_dcb ||
962 (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
963 srb_waiting_append(dcb, srb);
964 waiting_process_next(acb);
965 return;
966 }
967
968 if (!start_scsi(acb, dcb, srb))
969 srb_going_append(dcb, srb);
970 else {
971 srb_waiting_insert(dcb, srb);
972 waiting_set_timer(acb, HZ / 50);
973 }
974}
975
976
977static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
978 struct ScsiReqBlk *srb)
979{
980 int nseg;
981 enum dma_data_direction dir = cmd->sc_data_direction;
982 dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n",
983 cmd, dcb->target_id, dcb->target_lun);
984
985 srb->dcb = dcb;
986 srb->cmd = cmd;
987 srb->sg_count = 0;
988 srb->total_xfer_length = 0;
989 srb->sg_bus_addr = 0;
990 srb->sg_index = 0;
991 srb->adapter_status = 0;
992 srb->target_status = 0;
993 srb->msg_count = 0;
994 srb->status = 0;
995 srb->flag = 0;
996 srb->state = 0;
997 srb->retry_count = 0;
998 srb->tag_number = TAG_NONE;
999 srb->scsi_phase = PH_BUS_FREE;
1000 srb->end_message = 0;
1001
1002 nseg = scsi_dma_map(cmd);
1003 BUG_ON(nseg < 0);
1004
1005 if (dir == PCI_DMA_NONE || !nseg) {
1006 dprintkdbg(DBG_0,
1007 "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
1008 cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
1009 srb->segment_x[0].address);
1010 } else {
1011 int i;
1012 u32 reqlen = scsi_bufflen(cmd);
1013 struct scatterlist *sg;
1014 struct SGentry *sgp = srb->segment_x;
1015
1016 srb->sg_count = nseg;
1017
1018 dprintkdbg(DBG_0,
1019 "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
1020 reqlen, scsi_sglist(cmd), scsi_sg_count(cmd),
1021 srb->sg_count);
1022
1023 scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
1024 u32 busaddr = (u32)sg_dma_address(sg);
1025 u32 seglen = (u32)sg->length;
1026 sgp[i].address = busaddr;
1027 sgp[i].length = seglen;
1028 srb->total_xfer_length += seglen;
1029 }
1030 sgp += srb->sg_count - 1;
1031
1032
1033
1034
1035
1036 if (srb->total_xfer_length > reqlen) {
1037 sgp->length -= (srb->total_xfer_length - reqlen);
1038 srb->total_xfer_length = reqlen;
1039 }
1040
1041
1042 if (dcb->sync_period & WIDE_SYNC &&
1043 srb->total_xfer_length % 2) {
1044 srb->total_xfer_length++;
1045 sgp->length++;
1046 }
1047
1048 srb->sg_bus_addr = pci_map_single(dcb->acb->dev,
1049 srb->segment_x,
1050 SEGMENTX_LEN,
1051 PCI_DMA_TODEVICE);
1052
1053 dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
1054 srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
1055 }
1056
1057 srb->request_length = srb->total_xfer_length;
1058}
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
1081{
1082 struct DeviceCtlBlk *dcb;
1083 struct ScsiReqBlk *srb;
1084 struct AdapterCtlBlk *acb =
1085 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1086 dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n",
1087 cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
1088
1089
1090 cmd->result = DID_BAD_TARGET << 16;
1091
1092
1093 if (cmd->device->id >= acb->scsi_host->max_id ||
1094 cmd->device->lun >= acb->scsi_host->max_lun ||
1095 cmd->device->lun >31) {
1096 goto complete;
1097 }
1098
1099
1100 if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
1101 dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
1102 cmd->device->id, (u8)cmd->device->lun);
1103 goto complete;
1104 }
1105
1106
1107 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
1108 if (!dcb) {
1109
1110 dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
1111 cmd->device->id, (u8)cmd->device->lun);
1112 goto complete;
1113 }
1114
1115
1116 cmd->scsi_done = done;
1117 cmd->result = 0;
1118
1119 srb = srb_get_free(acb);
1120 if (!srb)
1121 {
1122
1123
1124
1125
1126 dprintkdbg(DBG_0, "queue_command: No free srb's\n");
1127 return 1;
1128 }
1129
1130 build_srb(cmd, dcb, srb);
1131
1132 if (!list_empty(&dcb->srb_waiting_list)) {
1133
1134 srb_waiting_append(dcb, srb);
1135 waiting_process_next(acb);
1136 } else {
1137
1138 send_srb(acb, srb);
1139 }
1140 dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd);
1141 return 0;
1142
1143complete:
1144
1145
1146
1147
1148
1149
1150 done(cmd);
1151 return 0;
1152}
1153
1154static DEF_SCSI_QCMD(dc395x_queue_command)
1155
1156
1157
1158
1159static int dc395x_bios_param(struct scsi_device *sdev,
1160 struct block_device *bdev, sector_t capacity, int *info)
1161{
1162#ifdef CONFIG_SCSI_DC395x_TRMS1040_TRADMAP
1163 int heads, sectors, cylinders;
1164 struct AdapterCtlBlk *acb;
1165 int size = capacity;
1166
1167 dprintkdbg(DBG_0, "dc395x_bios_param..............\n");
1168 acb = (struct AdapterCtlBlk *)sdev->host->hostdata;
1169 heads = 64;
1170 sectors = 32;
1171 cylinders = size / (heads * sectors);
1172
1173 if ((acb->gmode2 & NAC_GREATER_1G) && (cylinders > 1024)) {
1174 heads = 255;
1175 sectors = 63;
1176 cylinders = size / (heads * sectors);
1177 }
1178 geom[0] = heads;
1179 geom[1] = sectors;
1180 geom[2] = cylinders;
1181 return 0;
1182#else
1183 return scsicam_bios_param(bdev, capacity, info);
1184#endif
1185}
1186
1187
1188static void dump_register_info(struct AdapterCtlBlk *acb,
1189 struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
1190{
1191 u16 pstat;
1192 struct pci_dev *dev = acb->dev;
1193 pci_read_config_word(dev, PCI_STATUS, &pstat);
1194 if (!dcb)
1195 dcb = acb->active_dcb;
1196 if (!srb && dcb)
1197 srb = dcb->active_srb;
1198 if (srb) {
1199 if (!srb->cmd)
1200 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
1201 srb, srb->cmd);
1202 else
1203 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p "
1204 "cmnd=0x%02x <%02i-%i>\n",
1205 srb, srb->cmd,
1206 srb->cmd->cmnd[0], srb->cmd->device->id,
1207 (u8)srb->cmd->device->lun);
1208 printk(" sglist=%p cnt=%i idx=%i len=%zu\n",
1209 srb->segment_x, srb->sg_count, srb->sg_index,
1210 srb->total_xfer_length);
1211 printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
1212 srb->state, srb->status, srb->scsi_phase,
1213 (acb->active_dcb) ? "" : "not");
1214 }
1215 dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x "
1216 "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x "
1217 "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x "
1218 "config2=0x%02x cmd=0x%02x selto=0x%02x}\n",
1219 DC395x_read16(acb, TRM_S1040_SCSI_STATUS),
1220 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
1221 DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL),
1222 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS),
1223 DC395x_read8(acb, TRM_S1040_SCSI_SYNC),
1224 DC395x_read8(acb, TRM_S1040_SCSI_TARGETID),
1225 DC395x_read8(acb, TRM_S1040_SCSI_IDMSG),
1226 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
1227 DC395x_read8(acb, TRM_S1040_SCSI_INTEN),
1228 DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0),
1229 DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2),
1230 DC395x_read8(acb, TRM_S1040_SCSI_COMMAND),
1231 DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT));
1232 dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x "
1233 "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x "
1234 "ctctr=0x%08x addr=0x%08x:0x%08x}\n",
1235 DC395x_read16(acb, TRM_S1040_DMA_COMMAND),
1236 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
1237 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
1238 DC395x_read8(acb, TRM_S1040_DMA_STATUS),
1239 DC395x_read8(acb, TRM_S1040_DMA_INTEN),
1240 DC395x_read16(acb, TRM_S1040_DMA_CONFIG),
1241 DC395x_read32(acb, TRM_S1040_DMA_XCNT),
1242 DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
1243 DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR),
1244 DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR));
1245 dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} "
1246 "pci{status=0x%04x}\n",
1247 DC395x_read8(acb, TRM_S1040_GEN_CONTROL),
1248 DC395x_read8(acb, TRM_S1040_GEN_STATUS),
1249 DC395x_read8(acb, TRM_S1040_GEN_TIMER),
1250 pstat);
1251}
1252
1253
1254static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt)
1255{
1256#if debug_enabled(DBG_FIFO)
1257 u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
1258 u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
1259 if (!(fifocnt & 0x40))
1260 dprintkdbg(DBG_FIFO,
1261 "clear_fifo: (%i bytes) on phase %02x in %s\n",
1262 fifocnt & 0x3f, lines, txt);
1263#endif
1264 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO);
1265}
1266
1267
1268static void reset_dev_param(struct AdapterCtlBlk *acb)
1269{
1270 struct DeviceCtlBlk *dcb;
1271 struct NvRamType *eeprom = &acb->eeprom;
1272 dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb);
1273
1274 list_for_each_entry(dcb, &acb->dcb_list, list) {
1275 u8 period_index;
1276
1277 dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE);
1278 dcb->sync_period = 0;
1279 dcb->sync_offset = 0;
1280
1281 dcb->dev_mode = eeprom->target[dcb->target_id].cfg0;
1282 period_index = eeprom->target[dcb->target_id].period & 0x07;
1283 dcb->min_nego_period = clock_period[period_index];
1284 if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO)
1285 || !(acb->config & HCC_WIDE_CARD))
1286 dcb->sync_mode &= ~WIDE_NEGO_ENABLE;
1287 }
1288}
1289
1290
1291
1292
1293
1294
1295
1296static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
1297{
1298 struct AdapterCtlBlk *acb =
1299 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1300 dprintkl(KERN_INFO,
1301 "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n",
1302 cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
1303
1304 if (timer_pending(&acb->waiting_timer))
1305 del_timer(&acb->waiting_timer);
1306
1307
1308
1309
1310 DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
1311 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
1312 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
1313 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
1314
1315 reset_scsi_bus(acb);
1316 udelay(500);
1317
1318
1319 acb->last_reset =
1320 jiffies + 3 * HZ / 2 +
1321 HZ * acb->eeprom.delay_time;
1322
1323
1324
1325
1326
1327 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
1328 clear_fifo(acb, "eh_bus_reset");
1329
1330 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
1331 set_basic_config(acb);
1332
1333 reset_dev_param(acb);
1334 doing_srb_done(acb, DID_RESET, cmd, 0);
1335 acb->active_dcb = NULL;
1336 acb->acb_flag = 0;
1337 waiting_process_next(acb);
1338
1339 return SUCCESS;
1340}
1341
1342static int dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
1343{
1344 int rc;
1345
1346 spin_lock_irq(cmd->device->host->host_lock);
1347 rc = __dc395x_eh_bus_reset(cmd);
1348 spin_unlock_irq(cmd->device->host->host_lock);
1349
1350 return rc;
1351}
1352
1353
1354
1355
1356
1357
1358static int dc395x_eh_abort(struct scsi_cmnd *cmd)
1359{
1360
1361
1362
1363
1364 struct AdapterCtlBlk *acb =
1365 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1366 struct DeviceCtlBlk *dcb;
1367 struct ScsiReqBlk *srb;
1368 dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n",
1369 cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
1370
1371 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
1372 if (!dcb) {
1373 dprintkl(KERN_DEBUG, "eh_abort: No such device\n");
1374 return FAILED;
1375 }
1376
1377 srb = find_cmd(cmd, &dcb->srb_waiting_list);
1378 if (srb) {
1379 srb_waiting_remove(dcb, srb);
1380 pci_unmap_srb_sense(acb, srb);
1381 pci_unmap_srb(acb, srb);
1382 free_tag(dcb, srb);
1383 srb_free_insert(acb, srb);
1384 dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
1385 cmd->result = DID_ABORT << 16;
1386 return SUCCESS;
1387 }
1388 srb = find_cmd(cmd, &dcb->srb_going_list);
1389 if (srb) {
1390 dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n");
1391
1392 } else {
1393 dprintkl(KERN_DEBUG, "eh_abort: Command not found\n");
1394 }
1395 return FAILED;
1396}
1397
1398
1399
1400static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
1401 struct ScsiReqBlk *srb)
1402{
1403 u8 *ptr = srb->msgout_buf + srb->msg_count;
1404 if (srb->msg_count > 1) {
1405 dprintkl(KERN_INFO,
1406 "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n",
1407 srb->msg_count, srb->msgout_buf[0],
1408 srb->msgout_buf[1]);
1409 return;
1410 }
1411 if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
1412 dcb->sync_offset = 0;
1413 dcb->min_nego_period = 200 >> 2;
1414 } else if (dcb->sync_offset == 0)
1415 dcb->sync_offset = SYNC_NEGO_OFFSET;
1416
1417 *ptr++ = MSG_EXTENDED;
1418 *ptr++ = 3;
1419 *ptr++ = EXTENDED_SDTR;
1420 *ptr++ = dcb->min_nego_period;
1421 *ptr++ = dcb->sync_offset;
1422 srb->msg_count += 5;
1423 srb->state |= SRB_DO_SYNC_NEGO;
1424}
1425
1426
1427
1428static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
1429 struct ScsiReqBlk *srb)
1430{
1431 u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
1432 (acb->config & HCC_WIDE_CARD)) ? 1 : 0;
1433 u8 *ptr = srb->msgout_buf + srb->msg_count;
1434 if (srb->msg_count > 1) {
1435 dprintkl(KERN_INFO,
1436 "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n",
1437 srb->msg_count, srb->msgout_buf[0],
1438 srb->msgout_buf[1]);
1439 return;
1440 }
1441 *ptr++ = MSG_EXTENDED;
1442 *ptr++ = 2;
1443 *ptr++ = EXTENDED_WDTR;
1444 *ptr++ = wide;
1445 srb->msg_count += 4;
1446 srb->state |= SRB_DO_WIDE_NEGO;
1447}
1448
1449
1450#if 0
1451
1452
1453void selection_timeout_missed(unsigned long ptr);
1454
1455static void selto_timer(struct AdapterCtlBlk *acb)
1456{
1457 if (timer_pending(&acb->selto_timer))
1458 return;
1459 acb->selto_timer.function = selection_timeout_missed;
1460 acb->selto_timer.data = (unsigned long) acb;
1461 if (time_before
1462 (jiffies + HZ, acb->last_reset + HZ / 2))
1463 acb->selto_timer.expires =
1464 acb->last_reset + HZ / 2 + 1;
1465 else
1466 acb->selto_timer.expires = jiffies + HZ + 1;
1467 add_timer(&acb->selto_timer);
1468}
1469
1470
1471void selection_timeout_missed(unsigned long ptr)
1472{
1473 unsigned long flags;
1474 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
1475 struct ScsiReqBlk *srb;
1476 dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n");
1477 if (!acb->active_dcb || !acb->active_dcb->active_srb) {
1478 dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n");
1479 return;
1480 }
1481 DC395x_LOCK_IO(acb->scsi_host, flags);
1482 srb = acb->active_dcb->active_srb;
1483 disconnect(acb);
1484 DC395x_UNLOCK_IO(acb->scsi_host, flags);
1485}
1486#endif
1487
1488
1489static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1490 struct ScsiReqBlk* srb)
1491{
1492 u16 s_stat2, return_code;
1493 u8 s_stat, scsicommand, i, identify_message;
1494 u8 *ptr;
1495 dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
1496 dcb->target_id, dcb->target_lun, srb);
1497
1498 srb->tag_number = TAG_NONE;
1499
1500 s_stat = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
1501 s_stat2 = 0;
1502 s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
1503#if 1
1504 if (s_stat & 0x20 ) {
1505 dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n",
1506 s_stat, s_stat2);
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518 return 1;
1519 }
1520#endif
1521 if (acb->active_dcb) {
1522 dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a"
1523 "command while another command (0x%p) is active.",
1524 srb->cmd,
1525 acb->active_dcb->active_srb ?
1526 acb->active_dcb->active_srb->cmd : 0);
1527 return 1;
1528 }
1529 if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
1530 dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd);
1531 return 1;
1532 }
1533
1534
1535 if (time_before(jiffies, acb->last_reset - HZ / 2)) {
1536 dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
1537 return 1;
1538 }
1539
1540
1541 clear_fifo(acb, "start_scsi");
1542 DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
1543 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
1544 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
1545 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
1546 srb->scsi_phase = PH_BUS_FREE;
1547
1548 identify_message = dcb->identify_msg;
1549
1550
1551 if (srb->flag & AUTO_REQSENSE)
1552 identify_message &= 0xBF;
1553
1554 if (((srb->cmd->cmnd[0] == INQUIRY)
1555 || (srb->cmd->cmnd[0] == REQUEST_SENSE)
1556 || (srb->flag & AUTO_REQSENSE))
1557 && (((dcb->sync_mode & WIDE_NEGO_ENABLE)
1558 && !(dcb->sync_mode & WIDE_NEGO_DONE))
1559 || ((dcb->sync_mode & SYNC_NEGO_ENABLE)
1560 && !(dcb->sync_mode & SYNC_NEGO_DONE)))
1561 && (dcb->target_lun == 0)) {
1562 srb->msgout_buf[0] = identify_message;
1563 srb->msg_count = 1;
1564 scsicommand = SCMD_SEL_ATNSTOP;
1565 srb->state = SRB_MSGOUT;
1566#ifndef SYNC_FIRST
1567 if (dcb->sync_mode & WIDE_NEGO_ENABLE
1568 && dcb->inquiry7 & SCSI_INQ_WBUS16) {
1569 build_wdtr(acb, dcb, srb);
1570 goto no_cmd;
1571 }
1572#endif
1573 if (dcb->sync_mode & SYNC_NEGO_ENABLE
1574 && dcb->inquiry7 & SCSI_INQ_SYNC) {
1575 build_sdtr(acb, dcb, srb);
1576 goto no_cmd;
1577 }
1578 if (dcb->sync_mode & WIDE_NEGO_ENABLE
1579 && dcb->inquiry7 & SCSI_INQ_WBUS16) {
1580 build_wdtr(acb, dcb, srb);
1581 goto no_cmd;
1582 }
1583 srb->msg_count = 0;
1584 }
1585
1586 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, identify_message);
1587
1588 scsicommand = SCMD_SEL_ATN;
1589 srb->state = SRB_START_;
1590#ifndef DC395x_NO_TAGQ
1591 if ((dcb->sync_mode & EN_TAG_QUEUEING)
1592 && (identify_message & 0xC0)) {
1593
1594 u32 tag_mask = 1;
1595 u8 tag_number = 0;
1596 while (tag_mask & dcb->tag_mask
1597 && tag_number < dcb->max_command) {
1598 tag_mask = tag_mask << 1;
1599 tag_number++;
1600 }
1601 if (tag_number >= dcb->max_command) {
1602 dprintkl(KERN_WARNING, "start_scsi: (0x%p) "
1603 "Out of tags target=<%02i-%i>)\n",
1604 srb->cmd, srb->cmd->device->id,
1605 (u8)srb->cmd->device->lun);
1606 srb->state = SRB_READY;
1607 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
1608 DO_HWRESELECT);
1609 return 1;
1610 }
1611
1612 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_SIMPLE_QTAG);
1613 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number);
1614 dcb->tag_mask |= tag_mask;
1615 srb->tag_number = tag_number;
1616 scsicommand = SCMD_SEL_ATN3;
1617 srb->state = SRB_START_;
1618 }
1619#endif
1620
1621
1622 dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n",
1623 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
1624 srb->cmd->cmnd[0], srb->tag_number);
1625 if (srb->flag & AUTO_REQSENSE) {
1626 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
1627 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1628 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1629 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1630 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
1631 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1632 } else {
1633 ptr = (u8 *)srb->cmd->cmnd;
1634 for (i = 0; i < srb->cmd->cmd_len; i++)
1635 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
1636 }
1637 no_cmd:
1638 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
1639 DO_HWRESELECT | DO_DATALATCH);
1640 if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
1641
1642
1643
1644
1645
1646 dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n",
1647 srb->cmd, dcb->target_id, dcb->target_lun);
1648 srb->state = SRB_READY;
1649 free_tag(dcb, srb);
1650 srb->msg_count = 0;
1651 return_code = 1;
1652
1653 } else {
1654
1655
1656
1657
1658 srb->scsi_phase = PH_BUS_FREE;
1659 dcb->active_srb = srb;
1660 acb->active_dcb = dcb;
1661 return_code = 0;
1662
1663 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
1664 DO_DATALATCH | DO_HWRESELECT);
1665
1666 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, scsicommand);
1667 }
1668 return return_code;
1669}
1670
1671
1672#define DC395x_ENABLE_MSGOUT \
1673 DC395x_write16 (acb, TRM_S1040_SCSI_CONTROL, DO_SETATN); \
1674 srb->state |= SRB_MSGOUT
1675
1676
1677
1678static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
1679 struct ScsiReqBlk *srb)
1680{
1681 srb->msgout_buf[0] = ABORT;
1682 srb->msg_count = 1;
1683 DC395x_ENABLE_MSGOUT;
1684 srb->state &= ~SRB_MSGIN;
1685 srb->state |= SRB_MSGOUT;
1686}
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
1697 u16 scsi_status)
1698{
1699 struct DeviceCtlBlk *dcb;
1700 struct ScsiReqBlk *srb;
1701 u16 phase;
1702 u8 scsi_intstatus;
1703 unsigned long flags;
1704 void (*dc395x_statev)(struct AdapterCtlBlk *, struct ScsiReqBlk *,
1705 u16 *);
1706
1707 DC395x_LOCK_IO(acb->scsi_host, flags);
1708
1709
1710 scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
1711 if ((scsi_status & 0x2007) == 0x2002)
1712 dprintkl(KERN_DEBUG,
1713 "COP after COP completed? %04x\n", scsi_status);
1714 if (debug_enabled(DBG_KG)) {
1715 if (scsi_intstatus & INT_SELTIMEOUT)
1716 dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n");
1717 }
1718
1719
1720 if (timer_pending(&acb->selto_timer))
1721 del_timer(&acb->selto_timer);
1722
1723 if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) {
1724 disconnect(acb);
1725 goto out_unlock;
1726 }
1727 if (scsi_intstatus & INT_RESELECTED) {
1728 reselect(acb);
1729 goto out_unlock;
1730 }
1731 if (scsi_intstatus & INT_SELECT) {
1732 dprintkl(KERN_INFO, "Host does not support target mode!\n");
1733 goto out_unlock;
1734 }
1735 if (scsi_intstatus & INT_SCSIRESET) {
1736 scsi_reset_detect(acb);
1737 goto out_unlock;
1738 }
1739 if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) {
1740 dcb = acb->active_dcb;
1741 if (!dcb) {
1742 dprintkl(KERN_DEBUG,
1743 "Oops: BusService (%04x %02x) w/o ActiveDCB!\n",
1744 scsi_status, scsi_intstatus);
1745 goto out_unlock;
1746 }
1747 srb = dcb->active_srb;
1748 if (dcb->flag & ABORT_DEV_) {
1749 dprintkdbg(DBG_0, "MsgOut Abort Device.....\n");
1750 enable_msgout_abort(acb, srb);
1751 }
1752
1753
1754 phase = (u16)srb->scsi_phase;
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769 dc395x_statev = dc395x_scsi_phase0[phase];
1770 dc395x_statev(acb, srb, &scsi_status);
1771
1772
1773
1774
1775
1776
1777 srb->scsi_phase = scsi_status & PHASEMASK;
1778 phase = (u16)scsi_status & PHASEMASK;
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792 dc395x_statev = dc395x_scsi_phase1[phase];
1793 dc395x_statev(acb, srb, &scsi_status);
1794 }
1795 out_unlock:
1796 DC395x_UNLOCK_IO(acb->scsi_host, flags);
1797}
1798
1799
1800static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
1801{
1802 struct AdapterCtlBlk *acb = dev_id;
1803 u16 scsi_status;
1804 u8 dma_status;
1805 irqreturn_t handled = IRQ_NONE;
1806
1807
1808
1809
1810 scsi_status = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
1811 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
1812 if (scsi_status & SCSIINTERRUPT) {
1813
1814 dc395x_handle_interrupt(acb, scsi_status);
1815 handled = IRQ_HANDLED;
1816 }
1817 else if (dma_status & 0x20) {
1818
1819 dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status);
1820#if 0
1821 dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n");
1822 if (acb->active_dcb) {
1823 acb->active_dcb-> flag |= ABORT_DEV_;
1824 if (acb->active_dcb->active_srb)
1825 enable_msgout_abort(acb, acb->active_dcb->active_srb);
1826 }
1827 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO);
1828#else
1829 dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n");
1830 acb = NULL;
1831#endif
1832 handled = IRQ_HANDLED;
1833 }
1834
1835 return handled;
1836}
1837
1838
1839static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1840 u16 *pscsi_status)
1841{
1842 dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd);
1843 if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
1844 *pscsi_status = PH_BUS_FREE;
1845
1846 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1847 srb->state &= ~SRB_MSGOUT;
1848}
1849
1850
1851static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1852 u16 *pscsi_status)
1853{
1854 u16 i;
1855 u8 *ptr;
1856 dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd);
1857
1858 clear_fifo(acb, "msgout_phase1");
1859 if (!(srb->state & SRB_MSGOUT)) {
1860 srb->state |= SRB_MSGOUT;
1861 dprintkl(KERN_DEBUG,
1862 "msgout_phase1: (0x%p) Phase unexpected\n",
1863 srb->cmd);
1864 }
1865 if (!srb->msg_count) {
1866 dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n",
1867 srb->cmd);
1868 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP);
1869 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1870 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
1871 return;
1872 }
1873 ptr = (u8 *)srb->msgout_buf;
1874 for (i = 0; i < srb->msg_count; i++)
1875 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
1876 srb->msg_count = 0;
1877 if (srb->msgout_buf[0] == MSG_ABORT)
1878 srb->state = SRB_ABORT_SENT;
1879
1880 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
1881}
1882
1883
1884static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1885 u16 *pscsi_status)
1886{
1887 dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd);
1888 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1889}
1890
1891
1892static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1893 u16 *pscsi_status)
1894{
1895 struct DeviceCtlBlk *dcb;
1896 u8 *ptr;
1897 u16 i;
1898 dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd);
1899
1900 clear_fifo(acb, "command_phase1");
1901 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
1902 if (!(srb->flag & AUTO_REQSENSE)) {
1903 ptr = (u8 *)srb->cmd->cmnd;
1904 for (i = 0; i < srb->cmd->cmd_len; i++) {
1905 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr);
1906 ptr++;
1907 }
1908 } else {
1909 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
1910 dcb = acb->active_dcb;
1911
1912 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1913 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1914 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1915 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
1916 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1917 }
1918 srb->state |= SRB_COMMAND;
1919
1920 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1921
1922 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
1923}
1924
1925
1926
1927
1928
1929
1930static void sg_verify_length(struct ScsiReqBlk *srb)
1931{
1932 if (debug_enabled(DBG_SG)) {
1933 unsigned len = 0;
1934 unsigned idx = srb->sg_index;
1935 struct SGentry *psge = srb->segment_x + idx;
1936 for (; idx < srb->sg_count; psge++, idx++)
1937 len += psge->length;
1938 if (len != srb->total_xfer_length)
1939 dprintkdbg(DBG_SG,
1940 "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n",
1941 srb->total_xfer_length, len);
1942 }
1943}
1944
1945
1946
1947
1948
1949
1950static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
1951{
1952 u8 idx;
1953 u32 xferred = srb->total_xfer_length - left;
1954 struct SGentry *psge = srb->segment_x + srb->sg_index;
1955
1956 dprintkdbg(DBG_0,
1957 "sg_update_list: Transferred %i of %i bytes, %i remain\n",
1958 xferred, srb->total_xfer_length, left);
1959 if (xferred == 0) {
1960
1961 return;
1962 }
1963
1964 sg_verify_length(srb);
1965 srb->total_xfer_length = left;
1966 for (idx = srb->sg_index; idx < srb->sg_count; idx++) {
1967 if (xferred >= psge->length) {
1968
1969 xferred -= psge->length;
1970 } else {
1971
1972 psge->length -= xferred;
1973 psge->address += xferred;
1974 srb->sg_index = idx;
1975 pci_dma_sync_single_for_device(srb->dcb->
1976 acb->dev,
1977 srb->sg_bus_addr,
1978 SEGMENTX_LEN,
1979 PCI_DMA_TODEVICE);
1980 break;
1981 }
1982 psge++;
1983 }
1984 sg_verify_length(srb);
1985}
1986
1987
1988
1989
1990
1991
1992
1993
1994static void sg_subtract_one(struct ScsiReqBlk *srb)
1995{
1996 sg_update_list(srb, srb->total_xfer_length - 1);
1997}
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
2009 struct ScsiReqBlk *srb)
2010{
2011
2012 if (DC395x_read16(acb, TRM_S1040_DMA_COMMAND) & 0x0001) {
2013 if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
2014 clear_fifo(acb, "cleanup/in");
2015 if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
2016 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
2017 } else {
2018 if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
2019 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
2020 if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
2021 clear_fifo(acb, "cleanup/out");
2022 }
2023 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2024}
2025
2026
2027
2028
2029
2030
2031#define DC395x_LASTPIO 4
2032
2033
2034static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2035 u16 *pscsi_status)
2036{
2037 struct DeviceCtlBlk *dcb = srb->dcb;
2038 u16 scsi_status = *pscsi_status;
2039 u32 d_left_counter = 0;
2040 dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n",
2041 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055 dprintkdbg(DBG_PIO, "data_out_phase0: "
2056 "DMA{fifocnt=0x%02x fifostat=0x%02x} "
2057 "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n",
2058 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
2059 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
2060 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
2061 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status,
2062 srb->total_xfer_length);
2063 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO);
2064
2065 if (!(srb->state & SRB_XFERPAD)) {
2066 if (scsi_status & PARITYERROR)
2067 srb->status |= PARITY_ERROR;
2068
2069
2070
2071
2072
2073
2074
2075 if (!(scsi_status & SCSIXFERDONE)) {
2076
2077
2078
2079
2080 d_left_counter =
2081 (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
2082 0x1F);
2083 if (dcb->sync_period & WIDE_SYNC)
2084 d_left_counter <<= 1;
2085
2086 dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n"
2087 "SCSI{fifocnt=0x%02x cnt=0x%08x} "
2088 "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n",
2089 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
2090 (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
2091 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
2092 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
2093 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
2094 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
2095 DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
2096 }
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106 if (srb->total_xfer_length > DC395x_LASTPIO)
2107 d_left_counter +=
2108 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
2109
2110
2111
2112
2113 if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
2114 && scsi_bufflen(srb->cmd) % 2) {
2115 d_left_counter = 0;
2116 dprintkl(KERN_INFO,
2117 "data_out_phase0: Discard 1 byte (0x%02x)\n",
2118 scsi_status);
2119 }
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130 if (d_left_counter == 0) {
2131 srb->total_xfer_length = 0;
2132 } else {
2133
2134
2135
2136
2137
2138 long oldxferred =
2139 srb->total_xfer_length - d_left_counter;
2140 const int diff =
2141 (dcb->sync_period & WIDE_SYNC) ? 2 : 1;
2142 sg_update_list(srb, d_left_counter);
2143
2144 if ((srb->segment_x[srb->sg_index].length ==
2145 diff && scsi_sg_count(srb->cmd))
2146 || ((oldxferred & ~PAGE_MASK) ==
2147 (PAGE_SIZE - diff))
2148 ) {
2149 dprintkl(KERN_INFO, "data_out_phase0: "
2150 "Work around chip bug (%i)?\n", diff);
2151 d_left_counter =
2152 srb->total_xfer_length - diff;
2153 sg_update_list(srb, d_left_counter);
2154
2155
2156
2157
2158 }
2159 }
2160 }
2161 if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) {
2162 cleanup_after_transfer(acb, srb);
2163 }
2164}
2165
2166
2167static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2168 u16 *pscsi_status)
2169{
2170 dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n",
2171 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2172 clear_fifo(acb, "data_out_phase1");
2173
2174 data_io_transfer(acb, srb, XFERDATAOUT);
2175}
2176
2177static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2178 u16 *pscsi_status)
2179{
2180 u16 scsi_status = *pscsi_status;
2181
2182 dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n",
2183 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198 if (!(srb->state & SRB_XFERPAD)) {
2199 u32 d_left_counter;
2200 unsigned int sc, fc;
2201
2202 if (scsi_status & PARITYERROR) {
2203 dprintkl(KERN_INFO, "data_in_phase0: (0x%p) "
2204 "Parity Error\n", srb->cmd);
2205 srb->status |= PARITY_ERROR;
2206 }
2207
2208
2209
2210
2211
2212
2213 if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) {
2214#if 0
2215 int ctr = 6000000;
2216 dprintkl(KERN_DEBUG,
2217 "DIP0: Wait for DMA FIFO to flush ...\n");
2218
2219
2220
2221 while (!
2222 (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) &
2223 0x80) && --ctr);
2224 if (ctr < 6000000 - 1)
2225 dprintkl(KERN_DEBUG
2226 "DIP0: Had to wait for DMA ...\n");
2227 if (!ctr)
2228 dprintkl(KERN_ERR,
2229 "Deadlock in DIP0 waiting for DMA FIFO empty!!\n");
2230
2231#endif
2232 dprintkdbg(DBG_KG, "data_in_phase0: "
2233 "DMA{fifocnt=0x%02x fifostat=0x%02x}\n",
2234 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
2235 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT));
2236 }
2237
2238 sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
2239 fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
2240 d_left_counter = sc + ((fc & 0x1f)
2241 << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
2242 0));
2243 dprintkdbg(DBG_KG, "data_in_phase0: "
2244 "SCSI{fifocnt=0x%02x%s ctr=0x%08x} "
2245 "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} "
2246 "Remain{totxfer=%i scsi_fifo+ctr=%i}\n",
2247 fc,
2248 (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
2249 sc,
2250 fc,
2251 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
2252 DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
2253 srb->total_xfer_length, d_left_counter);
2254#if DC395x_LASTPIO
2255
2256 if (d_left_counter
2257 && srb->total_xfer_length <= DC395x_LASTPIO) {
2258 size_t left_io = srb->total_xfer_length;
2259
2260
2261
2262 dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) "
2263 "for remaining %i bytes:",
2264 fc & 0x1f,
2265 (srb->dcb->sync_period & WIDE_SYNC) ?
2266 "words" : "bytes",
2267 srb->total_xfer_length);
2268 if (srb->dcb->sync_period & WIDE_SYNC)
2269 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
2270 CFG2_WIDEFIFO);
2271 while (left_io) {
2272 unsigned char *virt, *base = NULL;
2273 unsigned long flags = 0;
2274 size_t len = left_io;
2275 size_t offset = srb->request_length - left_io;
2276
2277 local_irq_save(flags);
2278
2279
2280 base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
2281 srb->sg_count, &offset, &len);
2282 virt = base + offset;
2283
2284 left_io -= len;
2285
2286 while (len) {
2287 u8 byte;
2288 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2289 *virt++ = byte;
2290
2291 if (debug_enabled(DBG_PIO))
2292 printk(" %02x", byte);
2293
2294 d_left_counter--;
2295 sg_subtract_one(srb);
2296
2297 len--;
2298
2299 fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
2300
2301 if (fc == 0x40) {
2302 left_io = 0;
2303 break;
2304 }
2305 }
2306
2307 WARN_ON((fc != 0x40) == !d_left_counter);
2308
2309 if (fc == 0x40 && (srb->dcb->sync_period & WIDE_SYNC)) {
2310
2311 if (srb->total_xfer_length > 0) {
2312 u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2313
2314 *virt++ = byte;
2315 srb->total_xfer_length--;
2316 if (debug_enabled(DBG_PIO))
2317 printk(" %02x", byte);
2318 }
2319
2320 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2321 }
2322
2323 scsi_kunmap_atomic_sg(base);
2324 local_irq_restore(flags);
2325 }
2326
2327
2328 if (debug_enabled(DBG_PIO))
2329 printk("\n");
2330 }
2331#endif
2332
2333#if 0
2334
2335
2336
2337
2338 if (!(scsi_status & SCSIXFERDONE)) {
2339
2340
2341
2342
2343 d_left_counter =
2344 (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
2345 0x1F);
2346 if (srb->dcb->sync_period & WIDE_SYNC)
2347 d_left_counter <<= 1;
2348
2349
2350
2351
2352
2353 }
2354#endif
2355
2356 if (d_left_counter == 0
2357 || (scsi_status & SCSIXFERCNT_2_ZERO)) {
2358#if 0
2359 int ctr = 6000000;
2360 u8 TempDMAstatus;
2361 do {
2362 TempDMAstatus =
2363 DC395x_read8(acb, TRM_S1040_DMA_STATUS);
2364 } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr);
2365 if (!ctr)
2366 dprintkl(KERN_ERR,
2367 "Deadlock in DataInPhase0 waiting for DMA!!\n");
2368 srb->total_xfer_length = 0;
2369#endif
2370 srb->total_xfer_length = d_left_counter;
2371 } else {
2372
2373
2374
2375
2376
2377
2378
2379
2380 sg_update_list(srb, d_left_counter);
2381 }
2382 }
2383
2384 if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) {
2385 cleanup_after_transfer(acb, srb);
2386 }
2387}
2388
2389
2390static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2391 u16 *pscsi_status)
2392{
2393 dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n",
2394 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2395 data_io_transfer(acb, srb, XFERDATAIN);
2396}
2397
2398
2399static void data_io_transfer(struct AdapterCtlBlk *acb,
2400 struct ScsiReqBlk *srb, u16 io_dir)
2401{
2402 struct DeviceCtlBlk *dcb = srb->dcb;
2403 u8 bval;
2404 dprintkdbg(DBG_0,
2405 "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
2406 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
2407 ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
2408 srb->total_xfer_length, srb->sg_index, srb->sg_count);
2409 if (srb == acb->tmp_srb)
2410 dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n");
2411 if (srb->sg_index >= srb->sg_count) {
2412
2413 return;
2414 }
2415
2416 if (srb->total_xfer_length > DC395x_LASTPIO) {
2417 u8 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
2418
2419
2420
2421
2422 if (dma_status & XFERPENDING) {
2423 dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! "
2424 "Expect trouble!\n");
2425 dump_register_info(acb, dcb, srb);
2426 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
2427 }
2428
2429
2430
2431
2432
2433 srb->state |= SRB_DATA_XFER;
2434 DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0);
2435 if (scsi_sg_count(srb->cmd)) {
2436 io_dir |= DMACMD_SG;
2437 DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
2438 srb->sg_bus_addr +
2439 sizeof(struct SGentry) *
2440 srb->sg_index);
2441
2442 DC395x_write32(acb, TRM_S1040_DMA_XCNT,
2443 ((u32)(srb->sg_count -
2444 srb->sg_index) << 3));
2445 } else {
2446 io_dir &= ~DMACMD_SG;
2447 DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
2448 srb->segment_x[0].address);
2449 DC395x_write32(acb, TRM_S1040_DMA_XCNT,
2450 srb->segment_x[0].length);
2451 }
2452
2453 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
2454 srb->total_xfer_length);
2455 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2456 if (io_dir & DMACMD_DIR) {
2457 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2458 SCMD_DMA_IN);
2459 DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
2460 } else {
2461 DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
2462 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2463 SCMD_DMA_OUT);
2464 }
2465
2466 }
2467#if DC395x_LASTPIO
2468 else if (srb->total_xfer_length > 0) {
2469
2470
2471
2472
2473 srb->state |= SRB_DATA_XFER;
2474
2475 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
2476 srb->total_xfer_length);
2477 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2478 if (io_dir & DMACMD_DIR) {
2479 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2480 SCMD_FIFO_IN);
2481 } else {
2482 int ln = srb->total_xfer_length;
2483 size_t left_io = srb->total_xfer_length;
2484
2485 if (srb->dcb->sync_period & WIDE_SYNC)
2486 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
2487 CFG2_WIDEFIFO);
2488
2489 while (left_io) {
2490 unsigned char *virt, *base = NULL;
2491 unsigned long flags = 0;
2492 size_t len = left_io;
2493 size_t offset = srb->request_length - left_io;
2494
2495 local_irq_save(flags);
2496
2497 base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
2498 srb->sg_count, &offset, &len);
2499 virt = base + offset;
2500
2501 left_io -= len;
2502
2503 while (len--) {
2504 if (debug_enabled(DBG_PIO))
2505 printk(" %02x", *virt);
2506
2507 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++);
2508
2509 sg_subtract_one(srb);
2510 }
2511
2512 scsi_kunmap_atomic_sg(base);
2513 local_irq_restore(flags);
2514 }
2515 if (srb->dcb->sync_period & WIDE_SYNC) {
2516 if (ln % 2) {
2517 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
2518 if (debug_enabled(DBG_PIO))
2519 printk(" |00");
2520 }
2521 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2522 }
2523
2524 if (debug_enabled(DBG_PIO))
2525 printk("\n");
2526 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2527 SCMD_FIFO_OUT);
2528 }
2529 }
2530#endif
2531 else {
2532 u8 data = 0, data2 = 0;
2533 if (srb->sg_count) {
2534 srb->adapter_status = H_OVER_UNDER_RUN;
2535 srb->status |= OVER_RUN;
2536 }
2537
2538
2539
2540
2541
2542 if (dcb->sync_period & WIDE_SYNC) {
2543 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 2);
2544 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
2545 CFG2_WIDEFIFO);
2546 if (io_dir & DMACMD_DIR) {
2547 data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2548 data2 = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2549 } else {
2550
2551
2552
2553 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
2554 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'G');
2555 }
2556 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2557 } else {
2558 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
2559
2560
2561 if (io_dir & DMACMD_DIR)
2562 data = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2563 else
2564 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
2565 }
2566 srb->state |= SRB_XFERPAD;
2567 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2568
2569 bval = (io_dir & DMACMD_DIR) ? SCMD_FIFO_IN : SCMD_FIFO_OUT;
2570 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, bval);
2571 }
2572}
2573
2574
2575static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2576 u16 *pscsi_status)
2577{
2578 dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n",
2579 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2580 srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2581 srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2582 srb->state = SRB_COMPLETED;
2583 *pscsi_status = PH_BUS_FREE;
2584 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2585 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
2586}
2587
2588
2589static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2590 u16 *pscsi_status)
2591{
2592 dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n",
2593 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2594 srb->state = SRB_STATUS;
2595 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2596 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
2597}
2598
2599
2600
2601static inline u8 msgin_completed(u8 * msgbuf, u32 len)
2602{
2603 if (*msgbuf == EXTENDED_MESSAGE) {
2604 if (len < 2)
2605 return 0;
2606 if (len < msgbuf[1] + 2)
2607 return 0;
2608 } else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f)
2609 if (len < 2)
2610 return 0;
2611 return 1;
2612}
2613
2614
2615static inline void msgin_reject(struct AdapterCtlBlk *acb,
2616 struct ScsiReqBlk *srb)
2617{
2618 srb->msgout_buf[0] = MESSAGE_REJECT;
2619 srb->msg_count = 1;
2620 DC395x_ENABLE_MSGOUT;
2621 srb->state &= ~SRB_MSGIN;
2622 srb->state |= SRB_MSGOUT;
2623 dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n",
2624 srb->msgin_buf[0],
2625 srb->dcb->target_id, srb->dcb->target_lun);
2626}
2627
2628
2629static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
2630 struct DeviceCtlBlk *dcb, u8 tag)
2631{
2632 struct ScsiReqBlk *srb = NULL;
2633 struct ScsiReqBlk *i;
2634 dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n",
2635 srb->cmd, tag, srb);
2636
2637 if (!(dcb->tag_mask & (1 << tag)))
2638 dprintkl(KERN_DEBUG,
2639 "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n",
2640 dcb->tag_mask, tag);
2641
2642 if (list_empty(&dcb->srb_going_list))
2643 goto mingx0;
2644 list_for_each_entry(i, &dcb->srb_going_list, list) {
2645 if (i->tag_number == tag) {
2646 srb = i;
2647 break;
2648 }
2649 }
2650 if (!srb)
2651 goto mingx0;
2652
2653 dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n",
2654 srb->cmd, srb->dcb->target_id, srb->dcb->target_lun);
2655 if (dcb->flag & ABORT_DEV_) {
2656
2657 enable_msgout_abort(acb, srb);
2658 }
2659
2660 if (!(srb->state & SRB_DISCONNECT))
2661 goto mingx0;
2662
2663 memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len);
2664 srb->state |= dcb->active_srb->state;
2665 srb->state |= SRB_DATA_XFER;
2666 dcb->active_srb = srb;
2667
2668 return srb;
2669
2670 mingx0:
2671 srb = acb->tmp_srb;
2672 srb->state = SRB_UNEXPECT_RESEL;
2673 dcb->active_srb = srb;
2674 srb->msgout_buf[0] = MSG_ABORT_TAG;
2675 srb->msg_count = 1;
2676 DC395x_ENABLE_MSGOUT;
2677 dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
2678 return srb;
2679}
2680
2681
2682static inline void reprogram_regs(struct AdapterCtlBlk *acb,
2683 struct DeviceCtlBlk *dcb)
2684{
2685 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
2686 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
2687 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
2688 set_xfer_rate(acb, dcb);
2689}
2690
2691
2692
2693static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2694{
2695 struct DeviceCtlBlk *dcb = srb->dcb;
2696 dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n",
2697 dcb->target_id, dcb->target_lun);
2698
2699 dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
2700 dcb->sync_mode |= SYNC_NEGO_DONE;
2701
2702 dcb->sync_offset = 0;
2703 dcb->min_nego_period = 200 >> 2;
2704 srb->state &= ~SRB_DO_SYNC_NEGO;
2705 reprogram_regs(acb, dcb);
2706 if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
2707 && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
2708 build_wdtr(acb, dcb, srb);
2709 DC395x_ENABLE_MSGOUT;
2710 dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n");
2711 }
2712}
2713
2714
2715
2716static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2717{
2718 struct DeviceCtlBlk *dcb = srb->dcb;
2719 u8 bval;
2720 int fact;
2721 dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins "
2722 "(%02i.%01i MHz) Offset %i\n",
2723 dcb->target_id, srb->msgin_buf[3] << 2,
2724 (250 / srb->msgin_buf[3]),
2725 ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3],
2726 srb->msgin_buf[4]);
2727
2728 if (srb->msgin_buf[4] > 15)
2729 srb->msgin_buf[4] = 15;
2730 if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO))
2731 dcb->sync_offset = 0;
2732 else if (dcb->sync_offset == 0)
2733 dcb->sync_offset = srb->msgin_buf[4];
2734 if (srb->msgin_buf[4] > dcb->sync_offset)
2735 srb->msgin_buf[4] = dcb->sync_offset;
2736 else
2737 dcb->sync_offset = srb->msgin_buf[4];
2738 bval = 0;
2739 while (bval < 7 && (srb->msgin_buf[3] > clock_period[bval]
2740 || dcb->min_nego_period >
2741 clock_period[bval]))
2742 bval++;
2743 if (srb->msgin_buf[3] < clock_period[bval])
2744 dprintkl(KERN_INFO,
2745 "msgin_set_sync: Increase sync nego period to %ins\n",
2746 clock_period[bval] << 2);
2747 srb->msgin_buf[3] = clock_period[bval];
2748 dcb->sync_period &= 0xf0;
2749 dcb->sync_period |= ALT_SYNC | bval;
2750 dcb->min_nego_period = srb->msgin_buf[3];
2751
2752 if (dcb->sync_period & WIDE_SYNC)
2753 fact = 500;
2754 else
2755 fact = 250;
2756
2757 dprintkl(KERN_INFO,
2758 "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n",
2759 dcb->target_id, (fact == 500) ? "Wide16" : "",
2760 dcb->min_nego_period << 2, dcb->sync_offset,
2761 (fact / dcb->min_nego_period),
2762 ((fact % dcb->min_nego_period) * 10 +
2763 dcb->min_nego_period / 2) / dcb->min_nego_period);
2764
2765 if (!(srb->state & SRB_DO_SYNC_NEGO)) {
2766
2767 dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n",
2768 srb->msgin_buf[3] << 2, srb->msgin_buf[4]);
2769
2770 memcpy(srb->msgout_buf, srb->msgin_buf, 5);
2771 srb->msg_count = 5;
2772 DC395x_ENABLE_MSGOUT;
2773 dcb->sync_mode |= SYNC_NEGO_DONE;
2774 } else {
2775 if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
2776 && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
2777 build_wdtr(acb, dcb, srb);
2778 DC395x_ENABLE_MSGOUT;
2779 dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n");
2780 }
2781 }
2782 srb->state &= ~SRB_DO_SYNC_NEGO;
2783 dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE;
2784
2785 reprogram_regs(acb, dcb);
2786}
2787
2788
2789static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
2790 struct ScsiReqBlk *srb)
2791{
2792 struct DeviceCtlBlk *dcb = srb->dcb;
2793 dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
2794
2795 dcb->sync_period &= ~WIDE_SYNC;
2796 dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
2797 dcb->sync_mode |= WIDE_NEGO_DONE;
2798 srb->state &= ~SRB_DO_WIDE_NEGO;
2799 reprogram_regs(acb, dcb);
2800 if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
2801 && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
2802 build_sdtr(acb, dcb, srb);
2803 DC395x_ENABLE_MSGOUT;
2804 dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n");
2805 }
2806}
2807
2808static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2809{
2810 struct DeviceCtlBlk *dcb = srb->dcb;
2811 u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
2812 && acb->config & HCC_WIDE_CARD) ? 1 : 0;
2813 dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
2814
2815 if (srb->msgin_buf[3] > wide)
2816 srb->msgin_buf[3] = wide;
2817
2818 if (!(srb->state & SRB_DO_WIDE_NEGO)) {
2819 dprintkl(KERN_DEBUG,
2820 "msgin_set_wide: Wide nego initiated <%02i>\n",
2821 dcb->target_id);
2822 memcpy(srb->msgout_buf, srb->msgin_buf, 4);
2823 srb->msg_count = 4;
2824 srb->state |= SRB_DO_WIDE_NEGO;
2825 DC395x_ENABLE_MSGOUT;
2826 }
2827
2828 dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE);
2829 if (srb->msgin_buf[3] > 0)
2830 dcb->sync_period |= WIDE_SYNC;
2831 else
2832 dcb->sync_period &= ~WIDE_SYNC;
2833 srb->state &= ~SRB_DO_WIDE_NEGO;
2834
2835 dprintkdbg(DBG_1,
2836 "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n",
2837 (8 << srb->msgin_buf[3]), dcb->target_id);
2838 reprogram_regs(acb, dcb);
2839 if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
2840 && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
2841 build_sdtr(acb, dcb, srb);
2842 DC395x_ENABLE_MSGOUT;
2843 dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n");
2844 }
2845}
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2861 u16 *pscsi_status)
2862{
2863 struct DeviceCtlBlk *dcb = acb->active_dcb;
2864 dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd);
2865
2866 srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2867 if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
2868
2869 switch (srb->msgin_buf[0]) {
2870 case DISCONNECT:
2871 srb->state = SRB_DISCONNECT;
2872 break;
2873
2874 case SIMPLE_QUEUE_TAG:
2875 case HEAD_OF_QUEUE_TAG:
2876 case ORDERED_QUEUE_TAG:
2877 srb =
2878 msgin_qtag(acb, dcb,
2879 srb->msgin_buf[1]);
2880 break;
2881
2882 case MESSAGE_REJECT:
2883 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
2884 DO_CLRATN | DO_DATALATCH);
2885
2886 if (srb->state & SRB_DO_SYNC_NEGO) {
2887 msgin_set_async(acb, srb);
2888 break;
2889 }
2890
2891 if (srb->state & SRB_DO_WIDE_NEGO) {
2892 msgin_set_nowide(acb, srb);
2893 break;
2894 }
2895 enable_msgout_abort(acb, srb);
2896
2897 break;
2898
2899 case EXTENDED_MESSAGE:
2900
2901 if (srb->msgin_buf[1] == 3
2902 && srb->msgin_buf[2] == EXTENDED_SDTR) {
2903 msgin_set_sync(acb, srb);
2904 break;
2905 }
2906
2907 if (srb->msgin_buf[1] == 2
2908 && srb->msgin_buf[2] == EXTENDED_WDTR
2909 && srb->msgin_buf[3] <= 2) {
2910 msgin_set_wide(acb, srb);
2911 break;
2912 }
2913 msgin_reject(acb, srb);
2914 break;
2915
2916 case MSG_IGNOREWIDE:
2917
2918 dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
2919 break;
2920
2921 case COMMAND_COMPLETE:
2922
2923 break;
2924
2925 case SAVE_POINTERS:
2926
2927
2928
2929
2930 dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
2931 "SAVE POINTER rem=%i Ignore\n",
2932 srb->cmd, srb->total_xfer_length);
2933 break;
2934
2935 case RESTORE_POINTERS:
2936 dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n");
2937 break;
2938
2939 case ABORT:
2940 dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
2941 "<%02i-%i> ABORT msg\n",
2942 srb->cmd, dcb->target_id,
2943 dcb->target_lun);
2944 dcb->flag |= ABORT_DEV_;
2945 enable_msgout_abort(acb, srb);
2946 break;
2947
2948 default:
2949
2950 if (srb->msgin_buf[0] & IDENTIFY_BASE) {
2951 dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n");
2952 srb->msg_count = 1;
2953 srb->msgout_buf[0] = dcb->identify_msg;
2954 DC395x_ENABLE_MSGOUT;
2955 srb->state |= SRB_MSGOUT;
2956
2957 }
2958 msgin_reject(acb, srb);
2959 }
2960
2961
2962 srb->state &= ~SRB_MSGIN;
2963 acb->msg_len = 0;
2964 }
2965 *pscsi_status = PH_BUS_FREE;
2966 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2967 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
2968}
2969
2970
2971static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2972 u16 *pscsi_status)
2973{
2974 dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd);
2975 clear_fifo(acb, "msgin_phase1");
2976 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
2977 if (!(srb->state & SRB_MSGIN)) {
2978 srb->state &= ~SRB_DISCONNECT;
2979 srb->state |= SRB_MSGIN;
2980 }
2981 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2982
2983 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_IN);
2984}
2985
2986
2987static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2988 u16 *pscsi_status)
2989{
2990}
2991
2992
2993static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2994 u16 *pscsi_status)
2995{
2996}
2997
2998
2999static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb)
3000{
3001 struct DeviceCtlBlk *i;
3002
3003
3004 if (dcb->identify_msg & 0x07)
3005 return;
3006
3007 if (acb->scan_devices) {
3008 current_sync_offset = dcb->sync_offset;
3009 return;
3010 }
3011
3012 list_for_each_entry(i, &acb->dcb_list, list)
3013 if (i->target_id == dcb->target_id) {
3014 i->sync_period = dcb->sync_period;
3015 i->sync_offset = dcb->sync_offset;
3016 i->sync_mode = dcb->sync_mode;
3017 i->min_nego_period = dcb->min_nego_period;
3018 }
3019}
3020
3021
3022static void disconnect(struct AdapterCtlBlk *acb)
3023{
3024 struct DeviceCtlBlk *dcb = acb->active_dcb;
3025 struct ScsiReqBlk *srb;
3026
3027 if (!dcb) {
3028 dprintkl(KERN_ERR, "disconnect: No such device\n");
3029 udelay(500);
3030
3031 acb->last_reset =
3032 jiffies + HZ / 2 +
3033 HZ * acb->eeprom.delay_time;
3034 clear_fifo(acb, "disconnectEx");
3035 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
3036 return;
3037 }
3038 srb = dcb->active_srb;
3039 acb->active_dcb = NULL;
3040 dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd);
3041
3042 srb->scsi_phase = PH_BUS_FREE;
3043 clear_fifo(acb, "disconnect");
3044 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
3045 if (srb->state & SRB_UNEXPECT_RESEL) {
3046 dprintkl(KERN_ERR,
3047 "disconnect: Unexpected reselection <%02i-%i>\n",
3048 dcb->target_id, dcb->target_lun);
3049 srb->state = 0;
3050 waiting_process_next(acb);
3051 } else if (srb->state & SRB_ABORT_SENT) {
3052 dcb->flag &= ~ABORT_DEV_;
3053 acb->last_reset = jiffies + HZ / 2 + 1;
3054 dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
3055 doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
3056 waiting_process_next(acb);
3057 } else {
3058 if ((srb->state & (SRB_START_ + SRB_MSGOUT))
3059 || !(srb->
3060 state & (SRB_DISCONNECT + SRB_COMPLETED))) {
3061
3062
3063
3064
3065
3066 if (srb->state != SRB_START_
3067 && srb->state != SRB_MSGOUT) {
3068 srb->state = SRB_READY;
3069 dprintkl(KERN_DEBUG,
3070 "disconnect: (0x%p) Unexpected\n",
3071 srb->cmd);
3072 srb->target_status = SCSI_STAT_SEL_TIMEOUT;
3073 goto disc1;
3074 } else {
3075
3076 dprintkdbg(DBG_KG, "disconnect: (0x%p) "
3077 "<%02i-%i> SelTO\n", srb->cmd,
3078 dcb->target_id, dcb->target_lun);
3079 if (srb->retry_count++ > DC395x_MAX_RETRIES
3080 || acb->scan_devices) {
3081 srb->target_status =
3082 SCSI_STAT_SEL_TIMEOUT;
3083 goto disc1;
3084 }
3085 free_tag(dcb, srb);
3086 srb_going_to_waiting_move(dcb, srb);
3087 dprintkdbg(DBG_KG,
3088 "disconnect: (0x%p) Retry\n",
3089 srb->cmd);
3090 waiting_set_timer(acb, HZ / 20);
3091 }
3092 } else if (srb->state & SRB_DISCONNECT) {
3093 u8 bval = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
3094
3095
3096
3097 if (bval & 0x40) {
3098 dprintkdbg(DBG_0, "disconnect: SCSI bus stat "
3099 " 0x%02x: ACK set! Other controllers?\n",
3100 bval);
3101
3102 } else
3103 waiting_process_next(acb);
3104 } else if (srb->state & SRB_COMPLETED) {
3105 disc1:
3106
3107
3108
3109 free_tag(dcb, srb);
3110 dcb->active_srb = NULL;
3111 srb->state = SRB_FREE;
3112 srb_done(acb, dcb, srb);
3113 }
3114 }
3115}
3116
3117
3118static void reselect(struct AdapterCtlBlk *acb)
3119{
3120 struct DeviceCtlBlk *dcb = acb->active_dcb;
3121 struct ScsiReqBlk *srb = NULL;
3122 u16 rsel_tar_lun_id;
3123 u8 id, lun;
3124 u8 arblostflag = 0;
3125 dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
3126
3127 clear_fifo(acb, "reselect");
3128
3129
3130 rsel_tar_lun_id = DC395x_read16(acb, TRM_S1040_SCSI_TARGETID);
3131 if (dcb) {
3132 srb = dcb->active_srb;
3133 if (!srb) {
3134 dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, "
3135 "but active_srb == NULL\n");
3136 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
3137 return;
3138 }
3139
3140 if (!acb->scan_devices) {
3141 dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> "
3142 "Arb lost but Resel win rsel=%i stat=0x%04x\n",
3143 srb->cmd, dcb->target_id,
3144 dcb->target_lun, rsel_tar_lun_id,
3145 DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
3146 arblostflag = 1;
3147
3148
3149 srb->state = SRB_READY;
3150 free_tag(dcb, srb);
3151 srb_going_to_waiting_move(dcb, srb);
3152 waiting_set_timer(acb, HZ / 20);
3153
3154
3155 }
3156 }
3157
3158 if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8)))
3159 dprintkl(KERN_DEBUG, "reselect: Expects identify msg. "
3160 "Got %i!\n", rsel_tar_lun_id);
3161 id = rsel_tar_lun_id & 0xff;
3162 lun = (rsel_tar_lun_id >> 8) & 7;
3163 dcb = find_dcb(acb, id, lun);
3164 if (!dcb) {
3165 dprintkl(KERN_ERR, "reselect: From non existent device "
3166 "<%02i-%i>\n", id, lun);
3167 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
3168 return;
3169 }
3170 acb->active_dcb = dcb;
3171
3172 if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
3173 dprintkl(KERN_DEBUG, "reselect: in spite of forbidden "
3174 "disconnection? <%02i-%i>\n",
3175 dcb->target_id, dcb->target_lun);
3176
3177 if (dcb->sync_mode & EN_TAG_QUEUEING ) {
3178 srb = acb->tmp_srb;
3179 dcb->active_srb = srb;
3180 } else {
3181
3182 srb = dcb->active_srb;
3183 if (!srb || !(srb->state & SRB_DISCONNECT)) {
3184
3185
3186
3187 dprintkl(KERN_DEBUG,
3188 "reselect: w/o disconnected cmds <%02i-%i>\n",
3189 dcb->target_id, dcb->target_lun);
3190 srb = acb->tmp_srb;
3191 srb->state = SRB_UNEXPECT_RESEL;
3192 dcb->active_srb = srb;
3193 enable_msgout_abort(acb, srb);
3194 } else {
3195 if (dcb->flag & ABORT_DEV_) {
3196
3197 enable_msgout_abort(acb, srb);
3198 } else
3199 srb->state = SRB_DATA_XFER;
3200
3201 }
3202 }
3203 srb->scsi_phase = PH_BUS_FREE;
3204
3205
3206 dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
3207 DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
3208 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
3209 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
3210 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
3211 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
3212
3213 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
3214}
3215
3216
3217static inline u8 tagq_blacklist(char *name)
3218{
3219#ifndef DC395x_NO_TAGQ
3220#if 0
3221 u8 i;
3222 for (i = 0; i < BADDEVCNT; i++)
3223 if (memcmp(name, DC395x_baddevname1[i], 28) == 0)
3224 return 1;
3225#endif
3226 return 0;
3227#else
3228 return 1;
3229#endif
3230}
3231
3232
3233static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr)
3234{
3235
3236 if ((ptr->Vers & 0x07) >= 2 || (ptr->RDF & 0x0F) == 2) {
3237 if ((ptr->Flags & SCSI_INQ_CMDQUEUE)
3238 && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) &&
3239
3240
3241
3242 !tagq_blacklist(((char *)ptr) + 8)) {
3243 if (dcb->max_command == 1)
3244 dcb->max_command =
3245 dcb->acb->tag_max_num;
3246 dcb->sync_mode |= EN_TAG_QUEUEING;
3247
3248 } else
3249 dcb->max_command = 1;
3250 }
3251}
3252
3253
3254static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3255 struct ScsiInqData *ptr)
3256{
3257 u8 bval1 = ptr->DevType & SCSI_DEVTYPE;
3258 dcb->dev_type = bval1;
3259
3260 disc_tagq_set(dcb, ptr);
3261}
3262
3263
3264
3265static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
3266{
3267 struct scsi_cmnd *cmd = srb->cmd;
3268 enum dma_data_direction dir = cmd->sc_data_direction;
3269
3270 if (scsi_sg_count(cmd) && dir != PCI_DMA_NONE) {
3271
3272 dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
3273 srb->sg_bus_addr, SEGMENTX_LEN);
3274 pci_unmap_single(acb->dev, srb->sg_bus_addr,
3275 SEGMENTX_LEN,
3276 PCI_DMA_TODEVICE);
3277 dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
3278 scsi_sg_count(cmd), scsi_bufflen(cmd));
3279
3280 scsi_dma_unmap(cmd);
3281 }
3282}
3283
3284
3285
3286static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
3287 struct ScsiReqBlk *srb)
3288{
3289 if (!(srb->flag & AUTO_REQSENSE))
3290 return;
3291
3292 dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
3293 srb->segment_x[0].address);
3294 pci_unmap_single(acb->dev, srb->segment_x[0].address,
3295 srb->segment_x[0].length, PCI_DMA_FROMDEVICE);
3296
3297 srb->total_xfer_length = srb->xferred;
3298 srb->segment_x[0].address =
3299 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address;
3300 srb->segment_x[0].length =
3301 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length;
3302}
3303
3304
3305
3306
3307
3308
3309static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3310 struct ScsiReqBlk *srb)
3311{
3312 u8 tempcnt, status;
3313 struct scsi_cmnd *cmd = srb->cmd;
3314 enum dma_data_direction dir = cmd->sc_data_direction;
3315 int ckc_only = 1;
3316
3317 dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd,
3318 srb->cmd->device->id, (u8)srb->cmd->device->lun);
3319 dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
3320 srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
3321 scsi_sgtalbe(cmd));
3322 status = srb->target_status;
3323 if (srb->flag & AUTO_REQSENSE) {
3324 dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
3325 pci_unmap_srb_sense(acb, srb);
3326
3327
3328
3329 srb->flag &= ~AUTO_REQSENSE;
3330 srb->adapter_status = 0;
3331 srb->target_status = CHECK_CONDITION << 1;
3332 if (debug_enabled(DBG_1)) {
3333 switch (cmd->sense_buffer[2] & 0x0f) {
3334 case NOT_READY:
3335 dprintkl(KERN_DEBUG,
3336 "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3337 cmd->cmnd[0], dcb->target_id,
3338 dcb->target_lun, status, acb->scan_devices);
3339 break;
3340 case UNIT_ATTENTION:
3341 dprintkl(KERN_DEBUG,
3342 "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3343 cmd->cmnd[0], dcb->target_id,
3344 dcb->target_lun, status, acb->scan_devices);
3345 break;
3346 case ILLEGAL_REQUEST:
3347 dprintkl(KERN_DEBUG,
3348 "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3349 cmd->cmnd[0], dcb->target_id,
3350 dcb->target_lun, status, acb->scan_devices);
3351 break;
3352 case MEDIUM_ERROR:
3353 dprintkl(KERN_DEBUG,
3354 "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3355 cmd->cmnd[0], dcb->target_id,
3356 dcb->target_lun, status, acb->scan_devices);
3357 break;
3358 case HARDWARE_ERROR:
3359 dprintkl(KERN_DEBUG,
3360 "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3361 cmd->cmnd[0], dcb->target_id,
3362 dcb->target_lun, status, acb->scan_devices);
3363 break;
3364 }
3365 if (cmd->sense_buffer[7] >= 6)
3366 printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x "
3367 "(0x%08x 0x%08x)\n",
3368 cmd->sense_buffer[2], cmd->sense_buffer[12],
3369 cmd->sense_buffer[13],
3370 *((unsigned int *)(cmd->sense_buffer + 3)),
3371 *((unsigned int *)(cmd->sense_buffer + 8)));
3372 else
3373 printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n",
3374 cmd->sense_buffer[2],
3375 *((unsigned int *)(cmd->sense_buffer + 3)));
3376 }
3377
3378 if (status == (CHECK_CONDITION << 1)) {
3379 cmd->result = DID_BAD_TARGET << 16;
3380 goto ckc_e;
3381 }
3382 dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
3383
3384 if (srb->total_xfer_length
3385 && srb->total_xfer_length >= cmd->underflow)
3386 cmd->result =
3387 MK_RES_LNX(DRIVER_SENSE, DID_OK,
3388 srb->end_message, CHECK_CONDITION);
3389
3390 else
3391 cmd->result =
3392 MK_RES_LNX(DRIVER_SENSE, DID_OK,
3393 srb->end_message, CHECK_CONDITION);
3394
3395 goto ckc_e;
3396 }
3397
3398
3399 if (status) {
3400
3401
3402
3403 if (status_byte(status) == CHECK_CONDITION) {
3404 request_sense(acb, dcb, srb);
3405 return;
3406 } else if (status_byte(status) == QUEUE_FULL) {
3407 tempcnt = (u8)list_size(&dcb->srb_going_list);
3408 dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
3409 dcb->target_id, dcb->target_lun, tempcnt);
3410 if (tempcnt > 1)
3411 tempcnt--;
3412 dcb->max_command = tempcnt;
3413 free_tag(dcb, srb);
3414 srb_going_to_waiting_move(dcb, srb);
3415 waiting_set_timer(acb, HZ / 20);
3416 srb->adapter_status = 0;
3417 srb->target_status = 0;
3418 return;
3419 } else if (status == SCSI_STAT_SEL_TIMEOUT) {
3420 srb->adapter_status = H_SEL_TIMEOUT;
3421 srb->target_status = 0;
3422 cmd->result = DID_NO_CONNECT << 16;
3423 } else {
3424 srb->adapter_status = 0;
3425 SET_RES_DID(cmd->result, DID_ERROR);
3426 SET_RES_MSG(cmd->result, srb->end_message);
3427 SET_RES_TARGET(cmd->result, status);
3428
3429 }
3430 } else {
3431
3432
3433
3434 status = srb->adapter_status;
3435 if (status & H_OVER_UNDER_RUN) {
3436 srb->target_status = 0;
3437 SET_RES_DID(cmd->result, DID_OK);
3438 SET_RES_MSG(cmd->result, srb->end_message);
3439 } else if (srb->status & PARITY_ERROR) {
3440 SET_RES_DID(cmd->result, DID_PARITY);
3441 SET_RES_MSG(cmd->result, srb->end_message);
3442 } else {
3443
3444 srb->adapter_status = 0;
3445 srb->target_status = 0;
3446 SET_RES_DID(cmd->result, DID_OK);
3447 }
3448 }
3449
3450 if (dir != PCI_DMA_NONE && scsi_sg_count(cmd))
3451 pci_dma_sync_sg_for_cpu(acb->dev, scsi_sglist(cmd),
3452 scsi_sg_count(cmd), dir);
3453
3454 ckc_only = 0;
3455
3456 ckc_e:
3457
3458 if (cmd->cmnd[0] == INQUIRY) {
3459 unsigned char *base = NULL;
3460 struct ScsiInqData *ptr;
3461 unsigned long flags = 0;
3462 struct scatterlist* sg = scsi_sglist(cmd);
3463 size_t offset = 0, len = sizeof(struct ScsiInqData);
3464
3465 local_irq_save(flags);
3466 base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
3467 ptr = (struct ScsiInqData *)(base + offset);
3468
3469 if (!ckc_only && (cmd->result & RES_DID) == 0
3470 && cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
3471 && dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
3472 dcb->inquiry7 = ptr->Flags;
3473
3474
3475
3476 if ((cmd->result == (DID_OK << 16) ||
3477 status_byte(cmd->result) == CHECK_CONDITION)) {
3478 if (!dcb->init_tcq_flag) {
3479 add_dev(acb, dcb, ptr);
3480 dcb->init_tcq_flag = 1;
3481 }
3482 }
3483
3484 scsi_kunmap_atomic_sg(base);
3485 local_irq_restore(flags);
3486 }
3487
3488
3489 scsi_set_resid(cmd, srb->total_xfer_length);
3490
3491 cmd->SCp.this_residual = srb->total_xfer_length;
3492 cmd->SCp.buffers_residual = 0;
3493 if (debug_enabled(DBG_KG)) {
3494 if (srb->total_xfer_length)
3495 dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
3496 "cmnd=0x%02x Missed %i bytes\n",
3497 cmd, cmd->device->id, (u8)cmd->device->lun,
3498 cmd->cmnd[0], srb->total_xfer_length);
3499 }
3500
3501 srb_going_remove(dcb, srb);
3502
3503 if (srb == acb->tmp_srb)
3504 dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
3505 else {
3506 dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
3507 cmd, cmd->result);
3508 srb_free_insert(acb, srb);
3509 }
3510 pci_unmap_srb(acb, srb);
3511
3512 cmd->scsi_done(cmd);
3513 waiting_process_next(acb);
3514}
3515
3516
3517
3518static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
3519 struct scsi_cmnd *cmd, u8 force)
3520{
3521 struct DeviceCtlBlk *dcb;
3522 dprintkl(KERN_INFO, "doing_srb_done: pids ");
3523
3524 list_for_each_entry(dcb, &acb->dcb_list, list) {
3525 struct ScsiReqBlk *srb;
3526 struct ScsiReqBlk *tmp;
3527 struct scsi_cmnd *p;
3528
3529 list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
3530 enum dma_data_direction dir;
3531 int result;
3532
3533 p = srb->cmd;
3534 dir = p->sc_data_direction;
3535 result = MK_RES(0, did_flag, 0, 0);
3536 printk("G:%p(%02i-%i) ", p,
3537 p->device->id, (u8)p->device->lun);
3538 srb_going_remove(dcb, srb);
3539 free_tag(dcb, srb);
3540 srb_free_insert(acb, srb);
3541 p->result = result;
3542 pci_unmap_srb_sense(acb, srb);
3543 pci_unmap_srb(acb, srb);
3544 if (force) {
3545
3546
3547 p->scsi_done(p);
3548 }
3549 }
3550 if (!list_empty(&dcb->srb_going_list))
3551 dprintkl(KERN_DEBUG,
3552 "How could the ML send cmnds to the Going queue? <%02i-%i>\n",
3553 dcb->target_id, dcb->target_lun);
3554 if (dcb->tag_mask)
3555 dprintkl(KERN_DEBUG,
3556 "tag_mask for <%02i-%i> should be empty, is %08x!\n",
3557 dcb->target_id, dcb->target_lun,
3558 dcb->tag_mask);
3559
3560
3561 list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
3562 int result;
3563 p = srb->cmd;
3564
3565 result = MK_RES(0, did_flag, 0, 0);
3566 printk("W:%p<%02i-%i>", p, p->device->id,
3567 (u8)p->device->lun);
3568 srb_waiting_remove(dcb, srb);
3569 srb_free_insert(acb, srb);
3570 p->result = result;
3571 pci_unmap_srb_sense(acb, srb);
3572 pci_unmap_srb(acb, srb);
3573 if (force) {
3574
3575
3576 cmd->scsi_done(cmd);
3577 }
3578 }
3579 if (!list_empty(&dcb->srb_waiting_list))
3580 dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n",
3581 list_size(&dcb->srb_waiting_list), dcb->target_id,
3582 dcb->target_lun);
3583 dcb->flag &= ~ABORT_DEV_;
3584 }
3585 printk("\n");
3586}
3587
3588
3589static void reset_scsi_bus(struct AdapterCtlBlk *acb)
3590{
3591 dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb);
3592 acb->acb_flag |= RESET_DEV;
3593 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
3594
3595 while (!(DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET))
3596 ;
3597}
3598
3599
3600static void set_basic_config(struct AdapterCtlBlk *acb)
3601{
3602 u8 bval;
3603 u16 wval;
3604 DC395x_write8(acb, TRM_S1040_SCSI_TIMEOUT, acb->sel_timeout);
3605 if (acb->config & HCC_PARITY)
3606 bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK;
3607 else
3608 bval = PHASELATCH | INITIATOR | BLOCKRST;
3609
3610 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG0, bval);
3611
3612
3613 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG1, 0x03);
3614
3615 DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
3616
3617 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, 0x00);
3618
3619 wval = DC395x_read16(acb, TRM_S1040_GEN_CONTROL) & 0x7F;
3620 DC395x_write16(acb, TRM_S1040_GEN_CONTROL, wval);
3621
3622 wval = DC395x_read16(acb, TRM_S1040_DMA_CONFIG) & ~DMA_FIFO_CTRL;
3623 wval |=
3624 DMA_FIFO_HALF_HALF | DMA_ENHANCE ;
3625 DC395x_write16(acb, TRM_S1040_DMA_CONFIG, wval);
3626
3627 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
3628
3629 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x7F);
3630 DC395x_write8(acb, TRM_S1040_DMA_INTEN, EN_SCSIINTR | EN_DMAXFERERROR
3631
3632 );
3633}
3634
3635
3636static void scsi_reset_detect(struct AdapterCtlBlk *acb)
3637{
3638 dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb);
3639
3640 if (timer_pending(&acb->waiting_timer))
3641 del_timer(&acb->waiting_timer);
3642
3643 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
3644 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
3645
3646 udelay(500);
3647
3648 acb->last_reset =
3649 jiffies + 5 * HZ / 2 +
3650 HZ * acb->eeprom.delay_time;
3651
3652 clear_fifo(acb, "scsi_reset_detect");
3653 set_basic_config(acb);
3654
3655
3656
3657 if (acb->acb_flag & RESET_DEV) {
3658 acb->acb_flag |= RESET_DONE;
3659 } else {
3660 acb->acb_flag |= RESET_DETECT;
3661 reset_dev_param(acb);
3662 doing_srb_done(acb, DID_RESET, NULL, 1);
3663
3664 acb->active_dcb = NULL;
3665 acb->acb_flag = 0;
3666 waiting_process_next(acb);
3667 }
3668}
3669
3670
3671static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3672 struct ScsiReqBlk *srb)
3673{
3674 struct scsi_cmnd *cmd = srb->cmd;
3675 dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n",
3676 cmd, cmd->device->id, (u8)cmd->device->lun);
3677
3678 srb->flag |= AUTO_REQSENSE;
3679 srb->adapter_status = 0;
3680 srb->target_status = 0;
3681
3682
3683 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3684
3685
3686 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
3687 srb->segment_x[0].address;
3688 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length =
3689 srb->segment_x[0].length;
3690 srb->xferred = srb->total_xfer_length;
3691
3692 srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
3693 srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
3694
3695 srb->segment_x[0].address =
3696 pci_map_single(acb->dev, cmd->sense_buffer,
3697 SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
3698 dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
3699 cmd->sense_buffer, srb->segment_x[0].address,
3700 SCSI_SENSE_BUFFERSIZE);
3701 srb->sg_count = 1;
3702 srb->sg_index = 0;
3703
3704 if (start_scsi(acb, dcb, srb)) {
3705 dprintkl(KERN_DEBUG,
3706 "request_sense: (0x%p) failed <%02i-%i>\n",
3707 srb->cmd, dcb->target_id, dcb->target_lun);
3708 srb_going_to_waiting_move(dcb, srb);
3709 waiting_set_timer(acb, HZ / 100);
3710 }
3711}
3712
3713
3714
3715
3716
3717
3718
3719
3720
3721
3722
3723
3724
3725
3726
3727static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
3728 u8 target, u8 lun)
3729{
3730 struct NvRamType *eeprom = &acb->eeprom;
3731 u8 period_index = eeprom->target[target].period & 0x07;
3732 struct DeviceCtlBlk *dcb;
3733
3734 dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
3735 dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun);
3736 if (!dcb)
3737 return NULL;
3738 dcb->acb = NULL;
3739 INIT_LIST_HEAD(&dcb->srb_going_list);
3740 INIT_LIST_HEAD(&dcb->srb_waiting_list);
3741 dcb->active_srb = NULL;
3742 dcb->tag_mask = 0;
3743 dcb->max_command = 1;
3744 dcb->target_id = target;
3745 dcb->target_lun = lun;
3746 dcb->dev_mode = eeprom->target[target].cfg0;
3747#ifndef DC395x_NO_DISCONNECT
3748 dcb->identify_msg =
3749 IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
3750#else
3751 dcb->identify_msg = IDENTIFY(0, lun);
3752#endif
3753 dcb->inquiry7 = 0;
3754 dcb->sync_mode = 0;
3755 dcb->min_nego_period = clock_period[period_index];
3756 dcb->sync_period = 0;
3757 dcb->sync_offset = 0;
3758 dcb->flag = 0;
3759
3760#ifndef DC395x_NO_WIDE
3761 if ((dcb->dev_mode & NTC_DO_WIDE_NEGO)
3762 && (acb->config & HCC_WIDE_CARD))
3763 dcb->sync_mode |= WIDE_NEGO_ENABLE;
3764#endif
3765#ifndef DC395x_NO_SYNC
3766 if (dcb->dev_mode & NTC_DO_SYNC_NEGO)
3767 if (!(lun) || current_sync_offset)
3768 dcb->sync_mode |= SYNC_NEGO_ENABLE;
3769#endif
3770 if (dcb->target_lun != 0) {
3771
3772 struct DeviceCtlBlk *p;
3773 list_for_each_entry(p, &acb->dcb_list, list)
3774 if (p->target_id == dcb->target_id)
3775 break;
3776 dprintkdbg(DBG_1,
3777 "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
3778 dcb->target_id, dcb->target_lun,
3779 p->target_id, p->target_lun);
3780 dcb->sync_mode = p->sync_mode;
3781 dcb->sync_period = p->sync_period;
3782 dcb->min_nego_period = p->min_nego_period;
3783 dcb->sync_offset = p->sync_offset;
3784 dcb->inquiry7 = p->inquiry7;
3785 }
3786 return dcb;
3787}
3788
3789
3790
3791
3792
3793
3794
3795
3796static void adapter_add_device(struct AdapterCtlBlk *acb,
3797 struct DeviceCtlBlk *dcb)
3798{
3799
3800 dcb->acb = acb;
3801
3802
3803 if (list_empty(&acb->dcb_list))
3804 acb->dcb_run_robin = dcb;
3805
3806
3807 list_add_tail(&dcb->list, &acb->dcb_list);
3808
3809
3810 acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun);
3811 acb->children[dcb->target_id][dcb->target_lun] = dcb;
3812}
3813
3814
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824static void adapter_remove_device(struct AdapterCtlBlk *acb,
3825 struct DeviceCtlBlk *dcb)
3826{
3827 struct DeviceCtlBlk *i;
3828 struct DeviceCtlBlk *tmp;
3829 dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n",
3830 dcb->target_id, dcb->target_lun);
3831
3832
3833 if (acb->active_dcb == dcb)
3834 acb->active_dcb = NULL;
3835 if (acb->dcb_run_robin == dcb)
3836 acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb);
3837
3838
3839 list_for_each_entry_safe(i, tmp, &acb->dcb_list, list)
3840 if (dcb == i) {
3841 list_del(&i->list);
3842 break;
3843 }
3844
3845
3846 acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun);
3847 acb->children[dcb->target_id][dcb->target_lun] = NULL;
3848 dcb->acb = NULL;
3849}
3850
3851
3852
3853
3854
3855
3856
3857
3858
3859static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb,
3860 struct DeviceCtlBlk *dcb)
3861{
3862 if (list_size(&dcb->srb_going_list) > 1) {
3863 dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> "
3864 "Won't remove because of %i active requests.\n",
3865 dcb->target_id, dcb->target_lun,
3866 list_size(&dcb->srb_going_list));
3867 return;
3868 }
3869 adapter_remove_device(acb, dcb);
3870 kfree(dcb);
3871}
3872
3873
3874
3875
3876
3877
3878
3879
3880static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
3881{
3882 struct DeviceCtlBlk *dcb;
3883 struct DeviceCtlBlk *tmp;
3884 dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n",
3885 list_size(&acb->dcb_list));
3886
3887 list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
3888 adapter_remove_and_free_device(acb, dcb);
3889}
3890
3891
3892
3893
3894
3895
3896
3897
3898
3899static int dc395x_slave_alloc(struct scsi_device *scsi_device)
3900{
3901 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
3902 struct DeviceCtlBlk *dcb;
3903
3904 dcb = device_alloc(acb, scsi_device->id, scsi_device->lun);
3905 if (!dcb)
3906 return -ENOMEM;
3907 adapter_add_device(acb, dcb);
3908
3909 return 0;
3910}
3911
3912
3913
3914
3915
3916
3917
3918
3919static void dc395x_slave_destroy(struct scsi_device *scsi_device)
3920{
3921 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
3922 struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
3923 if (dcb)
3924 adapter_remove_and_free_device(acb, dcb);
3925}
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937static void trms1040_wait_30us(unsigned long io_port)
3938{
3939
3940 outb(5, io_port + TRM_S1040_GEN_TIMER);
3941 while (!(inb(io_port + TRM_S1040_GEN_STATUS) & GTIMEOUT))
3942 ;
3943}
3944
3945
3946
3947
3948
3949
3950
3951
3952
3953
3954static void trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
3955{
3956 int i;
3957 u8 send_data;
3958
3959
3960 for (i = 0; i < 3; i++, cmd <<= 1) {
3961 send_data = NVR_SELECT;
3962 if (cmd & 0x04)
3963 send_data |= NVR_BITOUT;
3964
3965 outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
3966 trms1040_wait_30us(io_port);
3967 outb((send_data | NVR_CLOCK),
3968 io_port + TRM_S1040_GEN_NVRAM);
3969 trms1040_wait_30us(io_port);
3970 }
3971
3972
3973 for (i = 0; i < 7; i++, addr <<= 1) {
3974 send_data = NVR_SELECT;
3975 if (addr & 0x40)
3976 send_data |= NVR_BITOUT;
3977
3978 outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
3979 trms1040_wait_30us(io_port);
3980 outb((send_data | NVR_CLOCK),
3981 io_port + TRM_S1040_GEN_NVRAM);
3982 trms1040_wait_30us(io_port);
3983 }
3984 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3985 trms1040_wait_30us(io_port);
3986}
3987
3988
3989
3990
3991
3992
3993
3994
3995
3996
3997
3998
3999static void trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
4000{
4001 int i;
4002 u8 send_data;
4003
4004
4005 trms1040_write_cmd(io_port, 0x05, addr);
4006
4007
4008 for (i = 0; i < 8; i++, byte <<= 1) {
4009 send_data = NVR_SELECT;
4010 if (byte & 0x80)
4011 send_data |= NVR_BITOUT;
4012
4013 outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
4014 trms1040_wait_30us(io_port);
4015 outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
4016 trms1040_wait_30us(io_port);
4017 }
4018 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
4019 trms1040_wait_30us(io_port);
4020
4021
4022 outb(0, io_port + TRM_S1040_GEN_NVRAM);
4023 trms1040_wait_30us(io_port);
4024
4025 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
4026 trms1040_wait_30us(io_port);
4027
4028
4029 while (1) {
4030 outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
4031 trms1040_wait_30us(io_port);
4032
4033 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
4034 trms1040_wait_30us(io_port);
4035
4036 if (inb(io_port + TRM_S1040_GEN_NVRAM) & NVR_BITIN)
4037 break;
4038 }
4039
4040
4041 outb(0, io_port + TRM_S1040_GEN_NVRAM);
4042}
4043
4044
4045
4046
4047
4048
4049
4050
4051
4052
4053static void trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
4054{
4055 u8 *b_eeprom = (u8 *)eeprom;
4056 u8 addr;
4057
4058
4059 outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
4060 io_port + TRM_S1040_GEN_CONTROL);
4061
4062
4063 trms1040_write_cmd(io_port, 0x04, 0xFF);
4064 outb(0, io_port + TRM_S1040_GEN_NVRAM);
4065 trms1040_wait_30us(io_port);
4066
4067
4068 for (addr = 0; addr < 128; addr++, b_eeprom++)
4069 trms1040_set_data(io_port, addr, *b_eeprom);
4070
4071
4072 trms1040_write_cmd(io_port, 0x04, 0x00);
4073 outb(0, io_port + TRM_S1040_GEN_NVRAM);
4074 trms1040_wait_30us(io_port);
4075
4076
4077 outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
4078 io_port + TRM_S1040_GEN_CONTROL);
4079}
4080
4081
4082
4083
4084
4085
4086
4087
4088
4089
4090
4091
4092
4093static u8 trms1040_get_data(unsigned long io_port, u8 addr)
4094{
4095 int i;
4096 u8 read_byte;
4097 u8 result = 0;
4098
4099
4100 trms1040_write_cmd(io_port, 0x06, addr);
4101
4102
4103 for (i = 0; i < 8; i++) {
4104 outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
4105 trms1040_wait_30us(io_port);
4106 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
4107
4108
4109 read_byte = inb(io_port + TRM_S1040_GEN_NVRAM);
4110 result <<= 1;
4111 if (read_byte & NVR_BITIN)
4112 result |= 1;
4113
4114 trms1040_wait_30us(io_port);
4115 }
4116
4117
4118 outb(0, io_port + TRM_S1040_GEN_NVRAM);
4119 return result;
4120}
4121
4122
4123
4124
4125
4126
4127
4128
4129
4130
4131static void trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
4132{
4133 u8 *b_eeprom = (u8 *)eeprom;
4134 u8 addr;
4135
4136
4137 outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
4138 io_port + TRM_S1040_GEN_CONTROL);
4139
4140
4141 for (addr = 0; addr < 128; addr++, b_eeprom++)
4142 *b_eeprom = trms1040_get_data(io_port, addr);
4143
4144
4145 outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
4146 io_port + TRM_S1040_GEN_CONTROL);
4147}
4148
4149
4150
4151
4152
4153
4154
4155
4156
4157
4158
4159
4160
4161static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
4162{
4163 u16 *w_eeprom = (u16 *)eeprom;
4164 u16 w_addr;
4165 u16 cksum;
4166 u32 d_addr;
4167 u32 *d_eeprom;
4168
4169 trms1040_read_all(eeprom, io_port);
4170
4171 cksum = 0;
4172 for (w_addr = 0, w_eeprom = (u16 *)eeprom; w_addr < 64;
4173 w_addr++, w_eeprom++)
4174 cksum += *w_eeprom;
4175 if (cksum != 0x1234) {
4176
4177
4178
4179
4180 dprintkl(KERN_WARNING,
4181 "EEProm checksum error: using default values and options.\n");
4182 eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
4183 eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
4184 eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
4185 eeprom->sub_sys_id[1] =
4186 (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
4187 eeprom->sub_class = 0x00;
4188 eeprom->vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
4189 eeprom->vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
4190 eeprom->device_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
4191 eeprom->device_id[1] =
4192 (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
4193 eeprom->reserved = 0x00;
4194
4195 for (d_addr = 0, d_eeprom = (u32 *)eeprom->target;
4196 d_addr < 16; d_addr++, d_eeprom++)
4197 *d_eeprom = 0x00000077;
4198
4199 *d_eeprom++ = 0x04000F07;
4200 *d_eeprom++ = 0x00000015;
4201 for (d_addr = 0; d_addr < 12; d_addr++, d_eeprom++)
4202 *d_eeprom = 0x00;
4203
4204
4205 set_safe_settings();
4206 fix_settings();
4207 eeprom_override(eeprom);
4208
4209 eeprom->cksum = 0x00;
4210 for (w_addr = 0, cksum = 0, w_eeprom = (u16 *)eeprom;
4211 w_addr < 63; w_addr++, w_eeprom++)
4212 cksum += *w_eeprom;
4213
4214 *w_eeprom = 0x1234 - cksum;
4215 trms1040_write_all(eeprom, io_port);
4216 eeprom->delay_time = cfg_data[CFG_RESET_DELAY].value;
4217 } else {
4218 set_safe_settings();
4219 eeprom_index_to_delay(eeprom);
4220 eeprom_override(eeprom);
4221 }
4222}
4223
4224
4225
4226
4227
4228
4229
4230
4231static void print_eeprom_settings(struct NvRamType *eeprom)
4232{
4233 dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
4234 eeprom->scsi_id,
4235 eeprom->target[0].period,
4236 clock_speed[eeprom->target[0].period] / 10,
4237 clock_speed[eeprom->target[0].period] % 10,
4238 eeprom->target[0].cfg0);
4239 dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n",
4240 eeprom->channel_cfg, eeprom->max_tag,
4241 1 << eeprom->max_tag, eeprom->delay_time);
4242}
4243
4244
4245
4246static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
4247{
4248 int i;
4249 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
4250
4251 for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
4252 kfree(acb->srb_array[i].segment_x);
4253}
4254
4255
4256
4257
4258
4259static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
4260{
4261 const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1)
4262 *SEGMENTX_LEN;
4263 int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE;
4264 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
4265 int srb_idx = 0;
4266 unsigned i = 0;
4267 struct SGentry *uninitialized_var(ptr);
4268
4269 for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
4270 acb->srb_array[i].segment_x = NULL;
4271
4272 dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
4273 while (pages--) {
4274 ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
4275 if (!ptr) {
4276 adapter_sg_tables_free(acb);
4277 return 1;
4278 }
4279 dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n",
4280 PAGE_SIZE, ptr, srb_idx);
4281 i = 0;
4282 while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT)
4283 acb->srb_array[srb_idx++].segment_x =
4284 ptr + (i++ * DC395x_MAX_SG_LISTENTRY);
4285 }
4286 if (i < srbs_per_page)
4287 acb->srb.segment_x =
4288 ptr + (i * DC395x_MAX_SG_LISTENTRY);
4289 else
4290 dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
4291 return 0;
4292}
4293
4294
4295
4296
4297
4298
4299
4300
4301
4302
4303
4304
4305static void adapter_print_config(struct AdapterCtlBlk *acb)
4306{
4307 u8 bval;
4308
4309 bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS);
4310 dprintkl(KERN_INFO, "%sConnectors: ",
4311 ((bval & WIDESCSI) ? "(Wide) " : ""));
4312 if (!(bval & CON5068))
4313 printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50");
4314 if (!(bval & CON68))
4315 printk("int68%s ", !(bval & INT68HIGH) ? "" : "(50)");
4316 if (!(bval & CON50))
4317 printk("int50 ");
4318 if ((bval & (CON5068 | CON50 | CON68)) ==
4319 0 )
4320 printk(" Oops! (All 3?) ");
4321 bval = DC395x_read8(acb, TRM_S1040_GEN_CONTROL);
4322 printk(" Termination: ");
4323 if (bval & DIS_TERM)
4324 printk("Disabled\n");
4325 else {
4326 if (bval & AUTOTERM)
4327 printk("Auto ");
4328 if (bval & LOW8TERM)
4329 printk("Low ");
4330 if (bval & UP8TERM)
4331 printk("High ");
4332 printk("\n");
4333 }
4334}
4335
4336
4337
4338
4339
4340
4341
4342
4343
4344
4345
4346
4347
4348
4349static void adapter_init_params(struct AdapterCtlBlk *acb)
4350{
4351 struct NvRamType *eeprom = &acb->eeprom;
4352 int i;
4353
4354
4355
4356
4357
4358 INIT_LIST_HEAD(&acb->dcb_list);
4359 acb->dcb_run_robin = NULL;
4360 acb->active_dcb = NULL;
4361
4362 INIT_LIST_HEAD(&acb->srb_free_list);
4363
4364 acb->tmp_srb = &acb->srb;
4365 timer_setup(&acb->waiting_timer, waiting_timeout, 0);
4366 timer_setup(&acb->selto_timer, NULL, 0);
4367
4368 acb->srb_count = DC395x_MAX_SRB_CNT;
4369
4370 acb->sel_timeout = DC395x_SEL_TIMEOUT;
4371
4372
4373 acb->tag_max_num = 1 << eeprom->max_tag;
4374 if (acb->tag_max_num > 30)
4375 acb->tag_max_num = 30;
4376
4377 acb->acb_flag = 0;
4378 acb->gmode2 = eeprom->channel_cfg;
4379 acb->config = 0;
4380
4381 if (eeprom->channel_cfg & NAC_SCANLUN)
4382 acb->lun_chk = 1;
4383 acb->scan_devices = 1;
4384
4385 acb->scsi_host->this_id = eeprom->scsi_id;
4386 acb->hostid_bit = (1 << acb->scsi_host->this_id);
4387
4388 for (i = 0; i < DC395x_MAX_SCSI_ID; i++)
4389 acb->dcb_map[i] = 0;
4390
4391 acb->msg_len = 0;
4392
4393
4394 for (i = 0; i < acb->srb_count - 1; i++)
4395 srb_free_insert(acb, &acb->srb_array[i]);
4396}
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407
4408
4409
4410
4411static void adapter_init_scsi_host(struct Scsi_Host *host)
4412{
4413 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
4414 struct NvRamType *eeprom = &acb->eeprom;
4415
4416 host->max_cmd_len = 24;
4417 host->can_queue = DC395x_MAX_CMD_QUEUE;
4418 host->cmd_per_lun = DC395x_MAX_CMD_PER_LUN;
4419 host->this_id = (int)eeprom->scsi_id;
4420 host->io_port = acb->io_port_base;
4421 host->n_io_port = acb->io_port_len;
4422 host->dma_channel = -1;
4423 host->unique_id = acb->io_port_base;
4424 host->irq = acb->irq_level;
4425 acb->last_reset = jiffies;
4426
4427 host->max_id = 16;
4428 if (host->max_id - 1 == eeprom->scsi_id)
4429 host->max_id--;
4430
4431 if (eeprom->channel_cfg & NAC_SCANLUN)
4432 host->max_lun = 8;
4433 else
4434 host->max_lun = 1;
4435}
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447static void adapter_init_chip(struct AdapterCtlBlk *acb)
4448{
4449 struct NvRamType *eeprom = &acb->eeprom;
4450
4451
4452 DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
4453 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
4454
4455
4456 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
4457
4458
4459 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
4460 udelay(20);
4461
4462
4463 acb->config = HCC_AUTOTERM | HCC_PARITY;
4464 if (DC395x_read8(acb, TRM_S1040_GEN_STATUS) & WIDESCSI)
4465 acb->config |= HCC_WIDE_CARD;
4466
4467 if (eeprom->channel_cfg & NAC_POWERON_SCSI_RESET)
4468 acb->config |= HCC_SCSI_RESET;
4469
4470 if (acb->config & HCC_SCSI_RESET) {
4471 dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n");
4472 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
4473
4474
4475
4476 udelay(500);
4477
4478 acb->last_reset =
4479 jiffies + HZ / 2 +
4480 HZ * acb->eeprom.delay_time;
4481
4482
4483 }
4484}
4485
4486
4487
4488
4489
4490
4491
4492
4493
4494
4495
4496
4497
4498
4499
4500static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
4501 u32 io_port_len, unsigned int irq)
4502{
4503 if (!request_region(io_port, io_port_len, DC395X_NAME)) {
4504 dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
4505 goto failed;
4506 }
4507
4508 acb->io_port_base = io_port;
4509 acb->io_port_len = io_port_len;
4510
4511 if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) {
4512
4513 dprintkl(KERN_INFO, "Failed to register IRQ\n");
4514 goto failed;
4515 }
4516
4517 acb->irq_level = irq;
4518
4519
4520 check_eeprom(&acb->eeprom, io_port);
4521 print_eeprom_settings(&acb->eeprom);
4522
4523
4524 adapter_init_params(acb);
4525
4526
4527 adapter_print_config(acb);
4528
4529 if (adapter_sg_tables_alloc(acb)) {
4530 dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n");
4531 goto failed;
4532 }
4533 adapter_init_scsi_host(acb->scsi_host);
4534 adapter_init_chip(acb);
4535 set_basic_config(acb);
4536
4537 dprintkdbg(DBG_0,
4538 "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p "
4539 "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
4540 acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk),
4541 sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk));
4542 return 0;
4543
4544failed:
4545 if (acb->irq_level)
4546 free_irq(acb->irq_level, acb);
4547 if (acb->io_port_base)
4548 release_region(acb->io_port_base, acb->io_port_len);
4549 adapter_sg_tables_free(acb);
4550
4551 return 1;
4552}
4553
4554
4555
4556
4557
4558
4559
4560
4561
4562static void adapter_uninit_chip(struct AdapterCtlBlk *acb)
4563{
4564
4565 DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0);
4566 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0);
4567
4568
4569 if (acb->config & HCC_SCSI_RESET)
4570 reset_scsi_bus(acb);
4571
4572
4573 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
4574}
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585static void adapter_uninit(struct AdapterCtlBlk *acb)
4586{
4587 unsigned long flags;
4588 DC395x_LOCK_IO(acb->scsi_host, flags);
4589
4590
4591 if (timer_pending(&acb->waiting_timer))
4592 del_timer(&acb->waiting_timer);
4593 if (timer_pending(&acb->selto_timer))
4594 del_timer(&acb->selto_timer);
4595
4596 adapter_uninit_chip(acb);
4597 adapter_remove_and_free_all_devices(acb);
4598 DC395x_UNLOCK_IO(acb->scsi_host, flags);
4599
4600 if (acb->irq_level)
4601 free_irq(acb->irq_level, acb);
4602 if (acb->io_port_base)
4603 release_region(acb->io_port_base, acb->io_port_len);
4604
4605 adapter_sg_tables_free(acb);
4606}
4607
4608
4609#undef YESNO
4610#define YESNO(YN) \
4611 if (YN) seq_printf(m, " Yes ");\
4612 else seq_printf(m, " No ")
4613
4614static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
4615{
4616 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
4617 int spd, spd1;
4618 struct DeviceCtlBlk *dcb;
4619 unsigned long flags;
4620 int dev;
4621
4622 seq_puts(m, DC395X_BANNER " PCI SCSI Host Adapter\n"
4623 " Driver Version " DC395X_VERSION "\n");
4624
4625 DC395x_LOCK_IO(acb->scsi_host, flags);
4626
4627 seq_printf(m, "SCSI Host Nr %i, ", host->host_no);
4628 seq_printf(m, "DC395U/UW/F DC315/U %s\n",
4629 (acb->config & HCC_WIDE_CARD) ? "Wide" : "");
4630 seq_printf(m, "io_port_base 0x%04lx, ", acb->io_port_base);
4631 seq_printf(m, "irq_level 0x%04x, ", acb->irq_level);
4632 seq_printf(m, " SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
4633
4634 seq_printf(m, "MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun);
4635 seq_printf(m, "AdapterID %i\n", host->this_id);
4636
4637 seq_printf(m, "tag_max_num %i", acb->tag_max_num);
4638
4639 seq_printf(m, ", FilterCfg 0x%02x",
4640 DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1));
4641 seq_printf(m, ", DelayReset %is\n", acb->eeprom.delay_time);
4642
4643
4644 seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list));
4645 seq_printf(m, "Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
4646 acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
4647 acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
4648 acb->dcb_map[6], acb->dcb_map[7]);
4649 seq_printf(m, " %02x %02x %02x %02x %02x %02x %02x %02x\n",
4650 acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
4651 acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
4652 acb->dcb_map[14], acb->dcb_map[15]);
4653
4654 seq_puts(m,
4655 "Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
4656
4657 dev = 0;
4658 list_for_each_entry(dcb, &acb->dcb_list, list) {
4659 int nego_period;
4660 seq_printf(m, "%02i %02i %02i ", dev, dcb->target_id,
4661 dcb->target_lun);
4662 YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
4663 YESNO(dcb->sync_offset);
4664 YESNO(dcb->sync_period & WIDE_SYNC);
4665 YESNO(dcb->dev_mode & NTC_DO_DISCONNECT);
4666 YESNO(dcb->dev_mode & NTC_DO_SEND_START);
4667 YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
4668 nego_period = clock_period[dcb->sync_period & 0x07] << 2;
4669 if (dcb->sync_offset)
4670 seq_printf(m, " %03i ns ", nego_period);
4671 else
4672 seq_printf(m, " (%03i ns)", (dcb->min_nego_period << 2));
4673
4674 if (dcb->sync_offset & 0x0f) {
4675 spd = 1000 / (nego_period);
4676 spd1 = 1000 % (nego_period);
4677 spd1 = (spd1 * 10 + nego_period / 2) / (nego_period);
4678 seq_printf(m, " %2i.%1i M %02i ", spd, spd1,
4679 (dcb->sync_offset & 0x0f));
4680 } else
4681 seq_puts(m, " ");
4682
4683
4684 seq_printf(m, " %02i\n", dcb->max_command);
4685 dev++;
4686 }
4687
4688 if (timer_pending(&acb->waiting_timer))
4689 seq_puts(m, "Waiting queue timer running\n");
4690 else
4691 seq_putc(m, '\n');
4692
4693 list_for_each_entry(dcb, &acb->dcb_list, list) {
4694 struct ScsiReqBlk *srb;
4695 if (!list_empty(&dcb->srb_waiting_list))
4696 seq_printf(m, "DCB (%02i-%i): Waiting: %i:",
4697 dcb->target_id, dcb->target_lun,
4698 list_size(&dcb->srb_waiting_list));
4699 list_for_each_entry(srb, &dcb->srb_waiting_list, list)
4700 seq_printf(m, " %p", srb->cmd);
4701 if (!list_empty(&dcb->srb_going_list))
4702 seq_printf(m, "\nDCB (%02i-%i): Going : %i:",
4703 dcb->target_id, dcb->target_lun,
4704 list_size(&dcb->srb_going_list));
4705 list_for_each_entry(srb, &dcb->srb_going_list, list)
4706 seq_printf(m, " %p", srb->cmd);
4707 if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
4708 seq_putc(m, '\n');
4709 }
4710
4711 if (debug_enabled(DBG_1)) {
4712 seq_printf(m, "DCB list for ACB %p:\n", acb);
4713 list_for_each_entry(dcb, &acb->dcb_list, list) {
4714 seq_printf(m, "%p -> ", dcb);
4715 }
4716 seq_puts(m, "END\n");
4717 }
4718
4719 DC395x_UNLOCK_IO(acb->scsi_host, flags);
4720 return 0;
4721}
4722
4723
4724static struct scsi_host_template dc395x_driver_template = {
4725 .module = THIS_MODULE,
4726 .proc_name = DC395X_NAME,
4727 .show_info = dc395x_show_info,
4728 .name = DC395X_BANNER " " DC395X_VERSION,
4729 .queuecommand = dc395x_queue_command,
4730 .bios_param = dc395x_bios_param,
4731 .slave_alloc = dc395x_slave_alloc,
4732 .slave_destroy = dc395x_slave_destroy,
4733 .can_queue = DC395x_MAX_CAN_QUEUE,
4734 .this_id = 7,
4735 .sg_tablesize = DC395x_MAX_SG_TABLESIZE,
4736 .cmd_per_lun = DC395x_MAX_CMD_PER_LUN,
4737 .eh_abort_handler = dc395x_eh_abort,
4738 .eh_bus_reset_handler = dc395x_eh_bus_reset,
4739 .use_clustering = DISABLE_CLUSTERING,
4740};
4741
4742
4743
4744
4745
4746
4747static void banner_display(void)
4748{
4749 static int banner_done = 0;
4750 if (!banner_done)
4751 {
4752 dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION);
4753 banner_done = 1;
4754 }
4755}
4756
4757
4758
4759
4760
4761
4762
4763
4764
4765
4766
4767
4768
4769
4770
4771static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
4772{
4773 struct Scsi_Host *scsi_host = NULL;
4774 struct AdapterCtlBlk *acb = NULL;
4775 unsigned long io_port_base;
4776 unsigned int io_port_len;
4777 unsigned int irq;
4778
4779 dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev));
4780 banner_display();
4781
4782 if (pci_enable_device(dev))
4783 {
4784 dprintkl(KERN_INFO, "PCI Enable device failed.\n");
4785 return -ENODEV;
4786 }
4787 io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK;
4788 io_port_len = pci_resource_len(dev, 0);
4789 irq = dev->irq;
4790 dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq);
4791
4792
4793 scsi_host = scsi_host_alloc(&dc395x_driver_template,
4794 sizeof(struct AdapterCtlBlk));
4795 if (!scsi_host) {
4796 dprintkl(KERN_INFO, "scsi_host_alloc failed\n");
4797 goto fail;
4798 }
4799 acb = (struct AdapterCtlBlk*)scsi_host->hostdata;
4800 acb->scsi_host = scsi_host;
4801 acb->dev = dev;
4802
4803
4804 if (adapter_init(acb, io_port_base, io_port_len, irq)) {
4805 dprintkl(KERN_INFO, "adapter init failed\n");
4806 goto fail;
4807 }
4808
4809 pci_set_master(dev);
4810
4811
4812 if (scsi_add_host(scsi_host, &dev->dev)) {
4813 dprintkl(KERN_ERR, "scsi_add_host failed\n");
4814 goto fail;
4815 }
4816 pci_set_drvdata(dev, scsi_host);
4817 scsi_scan_host(scsi_host);
4818
4819 return 0;
4820
4821fail:
4822 if (acb != NULL)
4823 adapter_uninit(acb);
4824 if (scsi_host != NULL)
4825 scsi_host_put(scsi_host);
4826 pci_disable_device(dev);
4827 return -ENODEV;
4828}
4829
4830
4831
4832
4833
4834
4835
4836
4837static void dc395x_remove_one(struct pci_dev *dev)
4838{
4839 struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
4840 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
4841
4842 dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb);
4843
4844 scsi_remove_host(scsi_host);
4845 adapter_uninit(acb);
4846 pci_disable_device(dev);
4847 scsi_host_put(scsi_host);
4848}
4849
4850
4851static struct pci_device_id dc395x_pci_table[] = {
4852 {
4853 .vendor = PCI_VENDOR_ID_TEKRAM,
4854 .device = PCI_DEVICE_ID_TEKRAM_TRMS1040,
4855 .subvendor = PCI_ANY_ID,
4856 .subdevice = PCI_ANY_ID,
4857 },
4858 {}
4859};
4860MODULE_DEVICE_TABLE(pci, dc395x_pci_table);
4861
4862
4863static struct pci_driver dc395x_driver = {
4864 .name = DC395X_NAME,
4865 .id_table = dc395x_pci_table,
4866 .probe = dc395x_init_one,
4867 .remove = dc395x_remove_one,
4868};
4869
4870
4871
4872
4873
4874
4875
4876static int __init dc395x_module_init(void)
4877{
4878 return pci_register_driver(&dc395x_driver);
4879}
4880
4881
4882
4883
4884
4885static void __exit dc395x_module_exit(void)
4886{
4887 pci_unregister_driver(&dc395x_driver);
4888}
4889
4890
4891module_init(dc395x_module_init);
4892module_exit(dc395x_module_exit);
4893
4894MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff");
4895MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series");
4896MODULE_LICENSE("GPL");
4897