1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49#include <linux/module.h>
50#include <linux/moduleparam.h>
51#include <linux/delay.h>
52#include <linux/ctype.h>
53#include <linux/blkdev.h>
54#include <linux/interrupt.h>
55#include <linux/init.h>
56#include <linux/spinlock.h>
57#include <linux/pci.h>
58#include <linux/list.h>
59#include <linux/vmalloc.h>
60#include <linux/slab.h>
61#include <asm/io.h>
62
63#include <scsi/scsi.h>
64#include <scsi/scsi_cmnd.h>
65#include <scsi/scsi_device.h>
66#include <scsi/scsi_host.h>
67#include <scsi/scsi_transport_spi.h>
68
69#include "dc395x.h"
70
71#define DC395X_NAME "dc395x"
72#define DC395X_BANNER "Tekram DC395(U/UW/F), DC315(U) - ASIC TRM-S1040"
73#define DC395X_VERSION "v2.05, 2004/03/08"
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92#define DBG_KG 0x0001
93#define DBG_0 0x0002
94#define DBG_1 0x0004
95#define DBG_SG 0x0020
96#define DBG_FIFO 0x0040
97#define DBG_PIO 0x0080
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112#define dprintkl(level, format, arg...) \
113 printk(level DC395X_NAME ": " format , ## arg)
114
115
116#ifdef DEBUG_MASK
117
118
119
120
121
122
123#define dprintkdbg(type, format, arg...) \
124 do { \
125 if ((type) & (DEBUG_MASK)) \
126 dprintkl(KERN_DEBUG , format , ## arg); \
127 } while (0)
128
129
130
131
132#define debug_enabled(type) ((DEBUG_MASK) & (type))
133
134#else
135
136
137
138#define dprintkdbg(type, format, arg...) \
139 do {} while (0)
140#define debug_enabled(type) (0)
141
142#endif
143
144
145#ifndef PCI_VENDOR_ID_TEKRAM
146#define PCI_VENDOR_ID_TEKRAM 0x1DE1
147#endif
148#ifndef PCI_DEVICE_ID_TEKRAM_TRMS1040
149#define PCI_DEVICE_ID_TEKRAM_TRMS1040 0x0391
150#endif
151
152
153#define DC395x_LOCK_IO(dev,flags) spin_lock_irqsave(((struct Scsi_Host *)dev)->host_lock, flags)
154#define DC395x_UNLOCK_IO(dev,flags) spin_unlock_irqrestore(((struct Scsi_Host *)dev)->host_lock, flags)
155
156#define DC395x_read8(acb,address) (u8)(inb(acb->io_port_base + (address)))
157#define DC395x_read16(acb,address) (u16)(inw(acb->io_port_base + (address)))
158#define DC395x_read32(acb,address) (u32)(inl(acb->io_port_base + (address)))
159#define DC395x_write8(acb,address,value) outb((value), acb->io_port_base + (address))
160#define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
161#define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
162
163#define TAG_NONE 255
164
165
166
167
168
169
170#define SEGMENTX_LEN (sizeof(struct SGentry)*DC395x_MAX_SG_LISTENTRY)
171
172
173struct SGentry {
174 u32 address;
175 u32 length;
176};
177
178
179struct NVRamTarget {
180 u8 cfg0;
181 u8 period;
182 u8 cfg2;
183 u8 cfg3;
184};
185
186struct NvRamType {
187 u8 sub_vendor_id[2];
188 u8 sub_sys_id[2];
189 u8 sub_class;
190 u8 vendor_id[2];
191 u8 device_id[2];
192 u8 reserved;
193 struct NVRamTarget target[DC395x_MAX_SCSI_ID];
194
195
196
197
198
199
200 u8 scsi_id;
201 u8 channel_cfg;
202 u8 delay_time;
203 u8 max_tag;
204 u8 reserved0;
205 u8 boot_target;
206 u8 boot_lun;
207 u8 reserved1;
208 u16 reserved2[22];
209 u16 cksum;
210};
211
212struct ScsiReqBlk {
213 struct list_head list;
214 struct DeviceCtlBlk *dcb;
215 struct scsi_cmnd *cmd;
216
217 struct SGentry *segment_x;
218 dma_addr_t sg_bus_addr;
219
220 u8 sg_count;
221 u8 sg_index;
222 size_t total_xfer_length;
223 size_t request_length;
224
225
226
227
228
229
230
231
232
233 size_t xferred;
234
235 u16 state;
236
237 u8 msgin_buf[6];
238 u8 msgout_buf[6];
239
240 u8 adapter_status;
241 u8 target_status;
242 u8 msg_count;
243 u8 end_message;
244
245 u8 tag_number;
246 u8 status;
247 u8 retry_count;
248 u8 flag;
249
250 u8 scsi_phase;
251};
252
253struct DeviceCtlBlk {
254 struct list_head list;
255 struct AdapterCtlBlk *acb;
256 struct list_head srb_going_list;
257 struct list_head srb_waiting_list;
258
259 struct ScsiReqBlk *active_srb;
260 u32 tag_mask;
261
262 u16 max_command;
263
264 u8 target_id;
265 u8 target_lun;
266 u8 identify_msg;
267 u8 dev_mode;
268
269 u8 inquiry7;
270 u8 sync_mode;
271 u8 min_nego_period;
272 u8 sync_period;
273
274 u8 sync_offset;
275 u8 flag;
276 u8 dev_type;
277 u8 init_tcq_flag;
278};
279
280struct AdapterCtlBlk {
281 struct Scsi_Host *scsi_host;
282
283 unsigned long io_port_base;
284 unsigned long io_port_len;
285
286 struct list_head dcb_list;
287 struct DeviceCtlBlk *dcb_run_robin;
288 struct DeviceCtlBlk *active_dcb;
289
290 struct list_head srb_free_list;
291 struct ScsiReqBlk *tmp_srb;
292 struct timer_list waiting_timer;
293 struct timer_list selto_timer;
294
295 unsigned long last_reset;
296
297 u16 srb_count;
298
299 u8 sel_timeout;
300
301 unsigned int irq_level;
302 u8 tag_max_num;
303 u8 acb_flag;
304 u8 gmode2;
305
306 u8 config;
307 u8 lun_chk;
308 u8 scan_devices;
309 u8 hostid_bit;
310
311 u8 dcb_map[DC395x_MAX_SCSI_ID];
312 struct DeviceCtlBlk *children[DC395x_MAX_SCSI_ID][32];
313
314 struct pci_dev *dev;
315
316 u8 msg_len;
317
318 struct ScsiReqBlk srb_array[DC395x_MAX_SRB_CNT];
319 struct ScsiReqBlk srb;
320
321 struct NvRamType eeprom;
322};
323
324
325
326
327
328static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
329 u16 *pscsi_status);
330static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
331 u16 *pscsi_status);
332static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
333 u16 *pscsi_status);
334static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
335 u16 *pscsi_status);
336static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
337 u16 *pscsi_status);
338static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
339 u16 *pscsi_status);
340static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
341 u16 *pscsi_status);
342static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
343 u16 *pscsi_status);
344static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
345 u16 *pscsi_status);
346static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
347 u16 *pscsi_status);
348static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
349 u16 *pscsi_status);
350static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
351 u16 *pscsi_status);
352static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
353 u16 *pscsi_status);
354static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
355 u16 *pscsi_status);
356static void set_basic_config(struct AdapterCtlBlk *acb);
357static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
358 struct ScsiReqBlk *srb);
359static void reset_scsi_bus(struct AdapterCtlBlk *acb);
360static void data_io_transfer(struct AdapterCtlBlk *acb,
361 struct ScsiReqBlk *srb, u16 io_dir);
362static void disconnect(struct AdapterCtlBlk *acb);
363static void reselect(struct AdapterCtlBlk *acb);
364static u8 start_scsi(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
365 struct ScsiReqBlk *srb);
366static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
367 struct ScsiReqBlk *srb);
368static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
369 struct ScsiReqBlk *srb);
370static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_code,
371 struct scsi_cmnd *cmd, u8 force);
372static void scsi_reset_detect(struct AdapterCtlBlk *acb);
373static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb);
374static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
375 struct ScsiReqBlk *srb);
376static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
377 struct ScsiReqBlk *srb);
378static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
379 struct ScsiReqBlk *srb);
380static void set_xfer_rate(struct AdapterCtlBlk *acb,
381 struct DeviceCtlBlk *dcb);
382static void waiting_timeout(struct timer_list *t);
383
384
385
386
387
388static u16 current_sync_offset = 0;
389
390static void *dc395x_scsi_phase0[] = {
391 data_out_phase0,
392 data_in_phase0,
393 command_phase0,
394 status_phase0,
395 nop0,
396 nop0,
397 msgout_phase0,
398 msgin_phase0,
399};
400
401static void *dc395x_scsi_phase1[] = {
402 data_out_phase1,
403 data_in_phase1,
404 command_phase1,
405 status_phase1,
406 nop1,
407 nop1,
408 msgout_phase1,
409 msgin_phase1,
410};
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434static u8 clock_period[] = { 12, 18, 25, 31, 37, 43, 50, 62 };
435static u16 clock_speed[] = { 200, 133, 100, 80, 67, 58, 50, 40 };
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451#define CFG_ADAPTER_ID 0
452#define CFG_MAX_SPEED 1
453#define CFG_DEV_MODE 2
454#define CFG_ADAPTER_MODE 3
455#define CFG_TAGS 4
456#define CFG_RESET_DELAY 5
457
458#define CFG_NUM 6
459
460
461
462
463
464
465#define CFG_PARAM_UNSET -1
466
467
468
469
470
471struct ParameterData {
472 int value;
473 int min;
474 int max;
475 int def;
476 int safe;
477};
478static struct ParameterData cfg_data[] = {
479 {
480 CFG_PARAM_UNSET,
481 0,
482 15,
483 7,
484 7
485 },
486 {
487 CFG_PARAM_UNSET,
488 0,
489 7,
490 1,
491 4,
492 },
493 {
494 CFG_PARAM_UNSET,
495 0,
496 0x3f,
497 NTC_DO_PARITY_CHK | NTC_DO_DISCONNECT | NTC_DO_SYNC_NEGO |
498 NTC_DO_WIDE_NEGO | NTC_DO_TAG_QUEUEING |
499 NTC_DO_SEND_START,
500 NTC_DO_PARITY_CHK | NTC_DO_SEND_START
501 },
502 {
503 CFG_PARAM_UNSET,
504 0,
505 0x2f,
506 NAC_SCANLUN |
507 NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET
508 ,
509 NAC_GT2DRIVES | NAC_GREATER_1G | NAC_POWERON_SCSI_RESET | 0x08
510 },
511 {
512 CFG_PARAM_UNSET,
513 0,
514 5,
515 3,
516 2,
517 },
518 {
519 CFG_PARAM_UNSET,
520 0,
521 180,
522 1,
523 10,
524 }
525};
526
527
528
529
530
531
532
533static bool use_safe_settings = 0;
534module_param_named(safe, use_safe_settings, bool, 0);
535MODULE_PARM_DESC(safe, "Use safe and slow settings only. Default: false");
536
537
538module_param_named(adapter_id, cfg_data[CFG_ADAPTER_ID].value, int, 0);
539MODULE_PARM_DESC(adapter_id, "Adapter SCSI ID. Default 7 (0-15)");
540
541module_param_named(max_speed, cfg_data[CFG_MAX_SPEED].value, int, 0);
542MODULE_PARM_DESC(max_speed, "Maximum bus speed. Default 1 (0-7) Speeds: 0=20, 1=13.3, 2=10, 3=8, 4=6.7, 5=5.8, 6=5, 7=4 Mhz");
543
544module_param_named(dev_mode, cfg_data[CFG_DEV_MODE].value, int, 0);
545MODULE_PARM_DESC(dev_mode, "Device mode.");
546
547module_param_named(adapter_mode, cfg_data[CFG_ADAPTER_MODE].value, int, 0);
548MODULE_PARM_DESC(adapter_mode, "Adapter mode.");
549
550module_param_named(tags, cfg_data[CFG_TAGS].value, int, 0);
551MODULE_PARM_DESC(tags, "Number of tags (1<<x). Default 3 (0-5)");
552
553module_param_named(reset_delay, cfg_data[CFG_RESET_DELAY].value, int, 0);
554MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)");
555
556
557
558
559
560
561static void set_safe_settings(void)
562{
563 if (use_safe_settings)
564 {
565 int i;
566
567 dprintkl(KERN_INFO, "Using safe settings.\n");
568 for (i = 0; i < CFG_NUM; i++)
569 {
570 cfg_data[i].value = cfg_data[i].safe;
571 }
572 }
573}
574
575
576
577
578
579
580static void fix_settings(void)
581{
582 int i;
583
584 dprintkdbg(DBG_1,
585 "setup: AdapterId=%08x MaxSpeed=%08x DevMode=%08x "
586 "AdapterMode=%08x Tags=%08x ResetDelay=%08x\n",
587 cfg_data[CFG_ADAPTER_ID].value,
588 cfg_data[CFG_MAX_SPEED].value,
589 cfg_data[CFG_DEV_MODE].value,
590 cfg_data[CFG_ADAPTER_MODE].value,
591 cfg_data[CFG_TAGS].value,
592 cfg_data[CFG_RESET_DELAY].value);
593 for (i = 0; i < CFG_NUM; i++)
594 {
595 if (cfg_data[i].value < cfg_data[i].min
596 || cfg_data[i].value > cfg_data[i].max)
597 cfg_data[i].value = cfg_data[i].def;
598 }
599}
600
601
602
603
604
605
606
607static char eeprom_index_to_delay_map[] =
608 { 1, 3, 5, 10, 16, 30, 60, 120 };
609
610
611
612
613
614
615
616
617static void eeprom_index_to_delay(struct NvRamType *eeprom)
618{
619 eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time];
620}
621
622
623
624
625
626
627
628
629
630static int delay_to_eeprom_index(int delay)
631{
632 u8 idx = 0;
633 while (idx < 7 && eeprom_index_to_delay_map[idx] < delay)
634 idx++;
635 return idx;
636}
637
638
639
640
641
642
643
644
645
646static void eeprom_override(struct NvRamType *eeprom)
647{
648 u8 id;
649
650
651 if (cfg_data[CFG_ADAPTER_ID].value != CFG_PARAM_UNSET)
652 eeprom->scsi_id = (u8)cfg_data[CFG_ADAPTER_ID].value;
653
654 if (cfg_data[CFG_ADAPTER_MODE].value != CFG_PARAM_UNSET)
655 eeprom->channel_cfg = (u8)cfg_data[CFG_ADAPTER_MODE].value;
656
657 if (cfg_data[CFG_RESET_DELAY].value != CFG_PARAM_UNSET)
658 eeprom->delay_time = delay_to_eeprom_index(
659 cfg_data[CFG_RESET_DELAY].value);
660
661 if (cfg_data[CFG_TAGS].value != CFG_PARAM_UNSET)
662 eeprom->max_tag = (u8)cfg_data[CFG_TAGS].value;
663
664
665 for (id = 0; id < DC395x_MAX_SCSI_ID; id++) {
666 if (cfg_data[CFG_DEV_MODE].value != CFG_PARAM_UNSET)
667 eeprom->target[id].cfg0 =
668 (u8)cfg_data[CFG_DEV_MODE].value;
669
670 if (cfg_data[CFG_MAX_SPEED].value != CFG_PARAM_UNSET)
671 eeprom->target[id].period =
672 (u8)cfg_data[CFG_MAX_SPEED].value;
673
674 }
675}
676
677
678
679
680
681static unsigned int list_size(struct list_head *head)
682{
683 unsigned int count = 0;
684 struct list_head *pos;
685 list_for_each(pos, head)
686 count++;
687 return count;
688}
689
690
691static struct DeviceCtlBlk *dcb_get_next(struct list_head *head,
692 struct DeviceCtlBlk *pos)
693{
694 int use_next = 0;
695 struct DeviceCtlBlk* next = NULL;
696 struct DeviceCtlBlk* i;
697
698 if (list_empty(head))
699 return NULL;
700
701
702 list_for_each_entry(i, head, list)
703 if (use_next) {
704 next = i;
705 break;
706 } else if (i == pos) {
707 use_next = 1;
708 }
709
710 if (!next)
711 list_for_each_entry(i, head, list) {
712 next = i;
713 break;
714 }
715
716 return next;
717}
718
719
720static void free_tag(struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
721{
722 if (srb->tag_number < 255) {
723 dcb->tag_mask &= ~(1 << srb->tag_number);
724 srb->tag_number = 255;
725 }
726}
727
728
729
730static inline struct ScsiReqBlk *find_cmd(struct scsi_cmnd *cmd,
731 struct list_head *head)
732{
733 struct ScsiReqBlk *i;
734 list_for_each_entry(i, head, list)
735 if (i->cmd == cmd)
736 return i;
737 return NULL;
738}
739
740
741static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to)
742{
743 if (timer_pending(&acb->waiting_timer))
744 return;
745 if (time_before(jiffies + to, acb->last_reset - HZ / 2))
746 acb->waiting_timer.expires =
747 acb->last_reset - HZ / 2 + 1;
748 else
749 acb->waiting_timer.expires = jiffies + to + 1;
750 add_timer(&acb->waiting_timer);
751}
752
753
754
755static void waiting_process_next(struct AdapterCtlBlk *acb)
756{
757 struct DeviceCtlBlk *start = NULL;
758 struct DeviceCtlBlk *pos;
759 struct DeviceCtlBlk *dcb;
760 struct ScsiReqBlk *srb;
761 struct list_head *dcb_list_head = &acb->dcb_list;
762
763 if (acb->active_dcb
764 || (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV)))
765 return;
766
767 if (timer_pending(&acb->waiting_timer))
768 del_timer(&acb->waiting_timer);
769
770 if (list_empty(dcb_list_head))
771 return;
772
773
774
775
776
777 list_for_each_entry(dcb, dcb_list_head, list)
778 if (dcb == acb->dcb_run_robin) {
779 start = dcb;
780 break;
781 }
782 if (!start) {
783
784 start = list_entry(dcb_list_head->next, typeof(*start), list);
785 acb->dcb_run_robin = start;
786 }
787
788
789
790
791
792
793 pos = start;
794 do {
795 struct list_head *waiting_list_head = &pos->srb_waiting_list;
796
797
798 acb->dcb_run_robin = dcb_get_next(dcb_list_head,
799 acb->dcb_run_robin);
800
801 if (list_empty(waiting_list_head) ||
802 pos->max_command <= list_size(&pos->srb_going_list)) {
803
804 pos = dcb_get_next(dcb_list_head, pos);
805 } else {
806 srb = list_entry(waiting_list_head->next,
807 struct ScsiReqBlk, list);
808
809
810 if (!start_scsi(acb, pos, srb))
811 list_move(&srb->list, &pos->srb_going_list);
812 else
813 waiting_set_timer(acb, HZ/50);
814 break;
815 }
816 } while (pos != start);
817}
818
819
820
821static void waiting_timeout(struct timer_list *t)
822{
823 unsigned long flags;
824 struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer);
825 dprintkdbg(DBG_1,
826 "waiting_timeout: Queue woken up by timer. acb=%p\n", acb);
827 DC395x_LOCK_IO(acb->scsi_host, flags);
828 waiting_process_next(acb);
829 DC395x_UNLOCK_IO(acb->scsi_host, flags);
830}
831
832
833
834static struct DeviceCtlBlk *find_dcb(struct AdapterCtlBlk *acb, u8 id, u8 lun)
835{
836 return acb->children[id][lun];
837}
838
839
840
841static void send_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
842{
843 struct DeviceCtlBlk *dcb = srb->dcb;
844
845 if (dcb->max_command <= list_size(&dcb->srb_going_list) ||
846 acb->active_dcb ||
847 (acb->acb_flag & (RESET_DETECT + RESET_DONE + RESET_DEV))) {
848 list_add_tail(&srb->list, &dcb->srb_waiting_list);
849 waiting_process_next(acb);
850 return;
851 }
852
853 if (!start_scsi(acb, dcb, srb)) {
854 list_add_tail(&srb->list, &dcb->srb_going_list);
855 } else {
856 list_add(&srb->list, &dcb->srb_waiting_list);
857 waiting_set_timer(acb, HZ / 50);
858 }
859}
860
861
862static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
863 struct ScsiReqBlk *srb)
864{
865 int nseg;
866 enum dma_data_direction dir = cmd->sc_data_direction;
867 dprintkdbg(DBG_0, "build_srb: (0x%p) <%02i-%i>\n",
868 cmd, dcb->target_id, dcb->target_lun);
869
870 srb->dcb = dcb;
871 srb->cmd = cmd;
872 srb->sg_count = 0;
873 srb->total_xfer_length = 0;
874 srb->sg_bus_addr = 0;
875 srb->sg_index = 0;
876 srb->adapter_status = 0;
877 srb->target_status = 0;
878 srb->msg_count = 0;
879 srb->status = 0;
880 srb->flag = 0;
881 srb->state = 0;
882 srb->retry_count = 0;
883 srb->tag_number = TAG_NONE;
884 srb->scsi_phase = PH_BUS_FREE;
885 srb->end_message = 0;
886
887 nseg = scsi_dma_map(cmd);
888 BUG_ON(nseg < 0);
889
890 if (dir == DMA_NONE || !nseg) {
891 dprintkdbg(DBG_0,
892 "build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
893 cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
894 srb->segment_x[0].address);
895 } else {
896 int i;
897 u32 reqlen = scsi_bufflen(cmd);
898 struct scatterlist *sg;
899 struct SGentry *sgp = srb->segment_x;
900
901 srb->sg_count = nseg;
902
903 dprintkdbg(DBG_0,
904 "build_srb: [n] len=%d buf=%p use_sg=%d segs=%d\n",
905 reqlen, scsi_sglist(cmd), scsi_sg_count(cmd),
906 srb->sg_count);
907
908 scsi_for_each_sg(cmd, sg, srb->sg_count, i) {
909 u32 busaddr = (u32)sg_dma_address(sg);
910 u32 seglen = (u32)sg->length;
911 sgp[i].address = busaddr;
912 sgp[i].length = seglen;
913 srb->total_xfer_length += seglen;
914 }
915 sgp += srb->sg_count - 1;
916
917
918
919
920
921 if (srb->total_xfer_length > reqlen) {
922 sgp->length -= (srb->total_xfer_length - reqlen);
923 srb->total_xfer_length = reqlen;
924 }
925
926
927 if (dcb->sync_period & WIDE_SYNC &&
928 srb->total_xfer_length % 2) {
929 srb->total_xfer_length++;
930 sgp->length++;
931 }
932
933 srb->sg_bus_addr = dma_map_single(&dcb->acb->dev->dev,
934 srb->segment_x, SEGMENTX_LEN, DMA_TO_DEVICE);
935
936 dprintkdbg(DBG_SG, "build_srb: [n] map sg %p->%08x(%05x)\n",
937 srb->segment_x, srb->sg_bus_addr, SEGMENTX_LEN);
938 }
939
940 srb->request_length = srb->total_xfer_length;
941}
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
964{
965 struct DeviceCtlBlk *dcb;
966 struct ScsiReqBlk *srb;
967 struct AdapterCtlBlk *acb =
968 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
969 dprintkdbg(DBG_0, "queue_command: (0x%p) <%02i-%i> cmnd=0x%02x\n",
970 cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
971
972
973 set_host_byte(cmd, DID_BAD_TARGET);
974
975
976 if (cmd->device->id >= acb->scsi_host->max_id ||
977 cmd->device->lun >= acb->scsi_host->max_lun ||
978 cmd->device->lun >31) {
979 goto complete;
980 }
981
982
983 if (!(acb->dcb_map[cmd->device->id] & (1 << cmd->device->lun))) {
984 dprintkl(KERN_INFO, "queue_command: Ignore target <%02i-%i>\n",
985 cmd->device->id, (u8)cmd->device->lun);
986 goto complete;
987 }
988
989
990 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
991 if (!dcb) {
992
993 dprintkl(KERN_ERR, "queue_command: No such device <%02i-%i>",
994 cmd->device->id, (u8)cmd->device->lun);
995 goto complete;
996 }
997
998
999 cmd->scsi_done = done;
1000 set_host_byte(cmd, DID_OK);
1001 set_status_byte(cmd, SAM_STAT_GOOD);
1002
1003 srb = list_first_entry_or_null(&acb->srb_free_list,
1004 struct ScsiReqBlk, list);
1005 if (!srb) {
1006
1007
1008
1009
1010 dprintkdbg(DBG_0, "queue_command: No free srb's\n");
1011 return 1;
1012 }
1013 list_del(&srb->list);
1014
1015 build_srb(cmd, dcb, srb);
1016
1017 if (!list_empty(&dcb->srb_waiting_list)) {
1018
1019 list_add_tail(&srb->list, &dcb->srb_waiting_list);
1020 waiting_process_next(acb);
1021 } else {
1022
1023 send_srb(acb, srb);
1024 }
1025 dprintkdbg(DBG_1, "queue_command: (0x%p) done\n", cmd);
1026 return 0;
1027
1028complete:
1029
1030
1031
1032
1033
1034
1035 done(cmd);
1036 return 0;
1037}
1038
1039static DEF_SCSI_QCMD(dc395x_queue_command)
1040
1041static void dump_register_info(struct AdapterCtlBlk *acb,
1042 struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb)
1043{
1044 u16 pstat;
1045 struct pci_dev *dev = acb->dev;
1046 pci_read_config_word(dev, PCI_STATUS, &pstat);
1047 if (!dcb)
1048 dcb = acb->active_dcb;
1049 if (!srb && dcb)
1050 srb = dcb->active_srb;
1051 if (srb) {
1052 if (!srb->cmd)
1053 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p OOOPS!\n",
1054 srb, srb->cmd);
1055 else
1056 dprintkl(KERN_INFO, "dump: srb=%p cmd=%p "
1057 "cmnd=0x%02x <%02i-%i>\n",
1058 srb, srb->cmd,
1059 srb->cmd->cmnd[0], srb->cmd->device->id,
1060 (u8)srb->cmd->device->lun);
1061 printk(" sglist=%p cnt=%i idx=%i len=%zu\n",
1062 srb->segment_x, srb->sg_count, srb->sg_index,
1063 srb->total_xfer_length);
1064 printk(" state=0x%04x status=0x%02x phase=0x%02x (%sconn.)\n",
1065 srb->state, srb->status, srb->scsi_phase,
1066 (acb->active_dcb) ? "" : "not");
1067 }
1068 dprintkl(KERN_INFO, "dump: SCSI{status=0x%04x fifocnt=0x%02x "
1069 "signals=0x%02x irqstat=0x%02x sync=0x%02x target=0x%02x "
1070 "rselid=0x%02x ctr=0x%08x irqen=0x%02x config=0x%04x "
1071 "config2=0x%02x cmd=0x%02x selto=0x%02x}\n",
1072 DC395x_read16(acb, TRM_S1040_SCSI_STATUS),
1073 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
1074 DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL),
1075 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS),
1076 DC395x_read8(acb, TRM_S1040_SCSI_SYNC),
1077 DC395x_read8(acb, TRM_S1040_SCSI_TARGETID),
1078 DC395x_read8(acb, TRM_S1040_SCSI_IDMSG),
1079 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
1080 DC395x_read8(acb, TRM_S1040_SCSI_INTEN),
1081 DC395x_read16(acb, TRM_S1040_SCSI_CONFIG0),
1082 DC395x_read8(acb, TRM_S1040_SCSI_CONFIG2),
1083 DC395x_read8(acb, TRM_S1040_SCSI_COMMAND),
1084 DC395x_read8(acb, TRM_S1040_SCSI_TIMEOUT));
1085 dprintkl(KERN_INFO, "dump: DMA{cmd=0x%04x fifocnt=0x%02x fstat=0x%02x "
1086 "irqstat=0x%02x irqen=0x%02x cfg=0x%04x tctr=0x%08x "
1087 "ctctr=0x%08x addr=0x%08x:0x%08x}\n",
1088 DC395x_read16(acb, TRM_S1040_DMA_COMMAND),
1089 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
1090 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
1091 DC395x_read8(acb, TRM_S1040_DMA_STATUS),
1092 DC395x_read8(acb, TRM_S1040_DMA_INTEN),
1093 DC395x_read16(acb, TRM_S1040_DMA_CONFIG),
1094 DC395x_read32(acb, TRM_S1040_DMA_XCNT),
1095 DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
1096 DC395x_read32(acb, TRM_S1040_DMA_XHIGHADDR),
1097 DC395x_read32(acb, TRM_S1040_DMA_XLOWADDR));
1098 dprintkl(KERN_INFO, "dump: gen{gctrl=0x%02x gstat=0x%02x gtmr=0x%02x} "
1099 "pci{status=0x%04x}\n",
1100 DC395x_read8(acb, TRM_S1040_GEN_CONTROL),
1101 DC395x_read8(acb, TRM_S1040_GEN_STATUS),
1102 DC395x_read8(acb, TRM_S1040_GEN_TIMER),
1103 pstat);
1104}
1105
1106
1107static inline void clear_fifo(struct AdapterCtlBlk *acb, char *txt)
1108{
1109#if debug_enabled(DBG_FIFO)
1110 u8 lines = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
1111 u8 fifocnt = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
1112 if (!(fifocnt & 0x40))
1113 dprintkdbg(DBG_FIFO,
1114 "clear_fifo: (%i bytes) on phase %02x in %s\n",
1115 fifocnt & 0x3f, lines, txt);
1116#endif
1117 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRFIFO);
1118}
1119
1120
1121static void reset_dev_param(struct AdapterCtlBlk *acb)
1122{
1123 struct DeviceCtlBlk *dcb;
1124 struct NvRamType *eeprom = &acb->eeprom;
1125 dprintkdbg(DBG_0, "reset_dev_param: acb=%p\n", acb);
1126
1127 list_for_each_entry(dcb, &acb->dcb_list, list) {
1128 u8 period_index;
1129
1130 dcb->sync_mode &= ~(SYNC_NEGO_DONE + WIDE_NEGO_DONE);
1131 dcb->sync_period = 0;
1132 dcb->sync_offset = 0;
1133
1134 dcb->dev_mode = eeprom->target[dcb->target_id].cfg0;
1135 period_index = eeprom->target[dcb->target_id].period & 0x07;
1136 dcb->min_nego_period = clock_period[period_index];
1137 if (!(dcb->dev_mode & NTC_DO_WIDE_NEGO)
1138 || !(acb->config & HCC_WIDE_CARD))
1139 dcb->sync_mode &= ~WIDE_NEGO_ENABLE;
1140 }
1141}
1142
1143
1144
1145
1146
1147
1148
1149static int __dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
1150{
1151 struct AdapterCtlBlk *acb =
1152 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1153 dprintkl(KERN_INFO,
1154 "eh_bus_reset: (0%p) target=<%02i-%i> cmd=%p\n",
1155 cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
1156
1157 if (timer_pending(&acb->waiting_timer))
1158 del_timer(&acb->waiting_timer);
1159
1160
1161
1162
1163 DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
1164 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
1165 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
1166 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
1167
1168 reset_scsi_bus(acb);
1169 udelay(500);
1170
1171
1172 acb->last_reset =
1173 jiffies + 3 * HZ / 2 +
1174 HZ * acb->eeprom.delay_time;
1175
1176
1177
1178
1179
1180 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
1181 clear_fifo(acb, "eh_bus_reset");
1182
1183 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
1184 set_basic_config(acb);
1185
1186 reset_dev_param(acb);
1187 doing_srb_done(acb, DID_RESET, cmd, 0);
1188 acb->active_dcb = NULL;
1189 acb->acb_flag = 0;
1190 waiting_process_next(acb);
1191
1192 return SUCCESS;
1193}
1194
1195static int dc395x_eh_bus_reset(struct scsi_cmnd *cmd)
1196{
1197 int rc;
1198
1199 spin_lock_irq(cmd->device->host->host_lock);
1200 rc = __dc395x_eh_bus_reset(cmd);
1201 spin_unlock_irq(cmd->device->host->host_lock);
1202
1203 return rc;
1204}
1205
1206
1207
1208
1209
1210
1211static int dc395x_eh_abort(struct scsi_cmnd *cmd)
1212{
1213
1214
1215
1216
1217 struct AdapterCtlBlk *acb =
1218 (struct AdapterCtlBlk *)cmd->device->host->hostdata;
1219 struct DeviceCtlBlk *dcb;
1220 struct ScsiReqBlk *srb;
1221 dprintkl(KERN_INFO, "eh_abort: (0x%p) target=<%02i-%i> cmd=%p\n",
1222 cmd, cmd->device->id, (u8)cmd->device->lun, cmd);
1223
1224 dcb = find_dcb(acb, cmd->device->id, cmd->device->lun);
1225 if (!dcb) {
1226 dprintkl(KERN_DEBUG, "eh_abort: No such device\n");
1227 return FAILED;
1228 }
1229
1230 srb = find_cmd(cmd, &dcb->srb_waiting_list);
1231 if (srb) {
1232 list_del(&srb->list);
1233 pci_unmap_srb_sense(acb, srb);
1234 pci_unmap_srb(acb, srb);
1235 free_tag(dcb, srb);
1236 list_add_tail(&srb->list, &acb->srb_free_list);
1237 dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
1238 set_host_byte(cmd, DID_ABORT);
1239 return SUCCESS;
1240 }
1241 srb = find_cmd(cmd, &dcb->srb_going_list);
1242 if (srb) {
1243 dprintkl(KERN_DEBUG, "eh_abort: Command in progress\n");
1244
1245 } else {
1246 dprintkl(KERN_DEBUG, "eh_abort: Command not found\n");
1247 }
1248 return FAILED;
1249}
1250
1251
1252
1253static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
1254 struct ScsiReqBlk *srb)
1255{
1256 u8 *ptr = srb->msgout_buf + srb->msg_count;
1257 if (srb->msg_count > 1) {
1258 dprintkl(KERN_INFO,
1259 "build_sdtr: msgout_buf BUSY (%i: %02x %02x)\n",
1260 srb->msg_count, srb->msgout_buf[0],
1261 srb->msgout_buf[1]);
1262 return;
1263 }
1264 if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO)) {
1265 dcb->sync_offset = 0;
1266 dcb->min_nego_period = 200 >> 2;
1267 } else if (dcb->sync_offset == 0)
1268 dcb->sync_offset = SYNC_NEGO_OFFSET;
1269
1270 srb->msg_count += spi_populate_sync_msg(ptr, dcb->min_nego_period,
1271 dcb->sync_offset);
1272 srb->state |= SRB_DO_SYNC_NEGO;
1273}
1274
1275
1276
1277static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
1278 struct ScsiReqBlk *srb)
1279{
1280 u8 wide = ((dcb->dev_mode & NTC_DO_WIDE_NEGO) &
1281 (acb->config & HCC_WIDE_CARD)) ? 1 : 0;
1282 u8 *ptr = srb->msgout_buf + srb->msg_count;
1283 if (srb->msg_count > 1) {
1284 dprintkl(KERN_INFO,
1285 "build_wdtr: msgout_buf BUSY (%i: %02x %02x)\n",
1286 srb->msg_count, srb->msgout_buf[0],
1287 srb->msgout_buf[1]);
1288 return;
1289 }
1290 srb->msg_count += spi_populate_width_msg(ptr, wide);
1291 srb->state |= SRB_DO_WIDE_NEGO;
1292}
1293
1294
1295#if 0
1296
1297
1298void selection_timeout_missed(unsigned long ptr);
1299
1300static void selto_timer(struct AdapterCtlBlk *acb)
1301{
1302 if (timer_pending(&acb->selto_timer))
1303 return;
1304 acb->selto_timer.function = selection_timeout_missed;
1305 acb->selto_timer.data = (unsigned long) acb;
1306 if (time_before
1307 (jiffies + HZ, acb->last_reset + HZ / 2))
1308 acb->selto_timer.expires =
1309 acb->last_reset + HZ / 2 + 1;
1310 else
1311 acb->selto_timer.expires = jiffies + HZ + 1;
1312 add_timer(&acb->selto_timer);
1313}
1314
1315
1316void selection_timeout_missed(unsigned long ptr)
1317{
1318 unsigned long flags;
1319 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr;
1320 struct ScsiReqBlk *srb;
1321 dprintkl(KERN_DEBUG, "Chip forgot to produce SelTO IRQ!\n");
1322 if (!acb->active_dcb || !acb->active_dcb->active_srb) {
1323 dprintkl(KERN_DEBUG, "... but no cmd pending? Oops!\n");
1324 return;
1325 }
1326 DC395x_LOCK_IO(acb->scsi_host, flags);
1327 srb = acb->active_dcb->active_srb;
1328 disconnect(acb);
1329 DC395x_UNLOCK_IO(acb->scsi_host, flags);
1330}
1331#endif
1332
1333
1334static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
1335 struct ScsiReqBlk* srb)
1336{
1337 u16 __maybe_unused s_stat2, return_code;
1338 u8 s_stat, scsicommand, i, identify_message;
1339 u8 *ptr;
1340 dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> srb=%p\n",
1341 dcb->target_id, dcb->target_lun, srb);
1342
1343 srb->tag_number = TAG_NONE;
1344
1345 s_stat = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
1346 s_stat2 = 0;
1347 s_stat2 = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
1348#if 1
1349 if (s_stat & 0x20 ) {
1350 dprintkdbg(DBG_KG, "start_scsi: (0x%p) BUSY %02x %04x\n",
1351 s_stat, s_stat2);
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363 return 1;
1364 }
1365#endif
1366 if (acb->active_dcb) {
1367 dprintkl(KERN_DEBUG, "start_scsi: (0x%p) Attempt to start a"
1368 "command while another command (0x%p) is active.",
1369 srb->cmd,
1370 acb->active_dcb->active_srb ?
1371 acb->active_dcb->active_srb->cmd : 0);
1372 return 1;
1373 }
1374 if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
1375 dprintkdbg(DBG_KG, "start_scsi: (0x%p) Failed (busy)\n", srb->cmd);
1376 return 1;
1377 }
1378
1379
1380 if (time_before(jiffies, acb->last_reset - HZ / 2)) {
1381 dprintkdbg(DBG_KG, "start_scsi: Refuse cmds (reset wait)\n");
1382 return 1;
1383 }
1384
1385
1386 clear_fifo(acb, "start_scsi");
1387 DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
1388 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
1389 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
1390 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
1391 srb->scsi_phase = PH_BUS_FREE;
1392
1393 identify_message = dcb->identify_msg;
1394
1395
1396 if (srb->flag & AUTO_REQSENSE)
1397 identify_message &= 0xBF;
1398
1399 if (((srb->cmd->cmnd[0] == INQUIRY)
1400 || (srb->cmd->cmnd[0] == REQUEST_SENSE)
1401 || (srb->flag & AUTO_REQSENSE))
1402 && (((dcb->sync_mode & WIDE_NEGO_ENABLE)
1403 && !(dcb->sync_mode & WIDE_NEGO_DONE))
1404 || ((dcb->sync_mode & SYNC_NEGO_ENABLE)
1405 && !(dcb->sync_mode & SYNC_NEGO_DONE)))
1406 && (dcb->target_lun == 0)) {
1407 srb->msgout_buf[0] = identify_message;
1408 srb->msg_count = 1;
1409 scsicommand = SCMD_SEL_ATNSTOP;
1410 srb->state = SRB_MSGOUT;
1411#ifndef SYNC_FIRST
1412 if (dcb->sync_mode & WIDE_NEGO_ENABLE
1413 && dcb->inquiry7 & SCSI_INQ_WBUS16) {
1414 build_wdtr(acb, dcb, srb);
1415 goto no_cmd;
1416 }
1417#endif
1418 if (dcb->sync_mode & SYNC_NEGO_ENABLE
1419 && dcb->inquiry7 & SCSI_INQ_SYNC) {
1420 build_sdtr(acb, dcb, srb);
1421 goto no_cmd;
1422 }
1423 if (dcb->sync_mode & WIDE_NEGO_ENABLE
1424 && dcb->inquiry7 & SCSI_INQ_WBUS16) {
1425 build_wdtr(acb, dcb, srb);
1426 goto no_cmd;
1427 }
1428 srb->msg_count = 0;
1429 }
1430
1431 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, identify_message);
1432
1433 scsicommand = SCMD_SEL_ATN;
1434 srb->state = SRB_START_;
1435#ifndef DC395x_NO_TAGQ
1436 if ((dcb->sync_mode & EN_TAG_QUEUEING)
1437 && (identify_message & 0xC0)) {
1438
1439 u32 tag_mask = 1;
1440 u8 tag_number = 0;
1441 while (tag_mask & dcb->tag_mask
1442 && tag_number < dcb->max_command) {
1443 tag_mask = tag_mask << 1;
1444 tag_number++;
1445 }
1446 if (tag_number >= dcb->max_command) {
1447 dprintkl(KERN_WARNING, "start_scsi: (0x%p) "
1448 "Out of tags target=<%02i-%i>)\n",
1449 srb->cmd, srb->cmd->device->id,
1450 (u8)srb->cmd->device->lun);
1451 srb->state = SRB_READY;
1452 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
1453 DO_HWRESELECT);
1454 return 1;
1455 }
1456
1457 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SIMPLE_QUEUE_TAG);
1458 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number);
1459 dcb->tag_mask |= tag_mask;
1460 srb->tag_number = tag_number;
1461 scsicommand = SCMD_SEL_ATN3;
1462 srb->state = SRB_START_;
1463 }
1464#endif
1465
1466
1467 dprintkdbg(DBG_KG, "start_scsi: (0x%p) <%02i-%i> cmnd=0x%02x tag=%i\n",
1468 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
1469 srb->cmd->cmnd[0], srb->tag_number);
1470 if (srb->flag & AUTO_REQSENSE) {
1471 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
1472 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1473 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1474 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1475 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
1476 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1477 } else {
1478 ptr = (u8 *)srb->cmd->cmnd;
1479 for (i = 0; i < srb->cmd->cmd_len; i++)
1480 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
1481 }
1482 no_cmd:
1483 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
1484 DO_HWRESELECT | DO_DATALATCH);
1485 if (DC395x_read16(acb, TRM_S1040_SCSI_STATUS) & SCSIINTERRUPT) {
1486
1487
1488
1489
1490
1491 dprintkdbg(DBG_0, "start_scsi: (0x%p) <%02i-%i> Failed - busy\n",
1492 srb->cmd, dcb->target_id, dcb->target_lun);
1493 srb->state = SRB_READY;
1494 free_tag(dcb, srb);
1495 srb->msg_count = 0;
1496 return_code = 1;
1497
1498 } else {
1499
1500
1501
1502
1503 srb->scsi_phase = PH_BUS_FREE;
1504 dcb->active_srb = srb;
1505 acb->active_dcb = dcb;
1506 return_code = 0;
1507
1508 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
1509 DO_DATALATCH | DO_HWRESELECT);
1510
1511 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, scsicommand);
1512 }
1513 return return_code;
1514}
1515
1516
1517#define DC395x_ENABLE_MSGOUT \
1518 DC395x_write16 (acb, TRM_S1040_SCSI_CONTROL, DO_SETATN); \
1519 srb->state |= SRB_MSGOUT
1520
1521
1522
1523static inline void enable_msgout_abort(struct AdapterCtlBlk *acb,
1524 struct ScsiReqBlk *srb)
1525{
1526 srb->msgout_buf[0] = ABORT;
1527 srb->msg_count = 1;
1528 DC395x_ENABLE_MSGOUT;
1529 srb->state &= ~SRB_MSGIN;
1530 srb->state |= SRB_MSGOUT;
1531}
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541static void dc395x_handle_interrupt(struct AdapterCtlBlk *acb,
1542 u16 scsi_status)
1543{
1544 struct DeviceCtlBlk *dcb;
1545 struct ScsiReqBlk *srb;
1546 u16 phase;
1547 u8 scsi_intstatus;
1548 unsigned long flags;
1549 void (*dc395x_statev)(struct AdapterCtlBlk *, struct ScsiReqBlk *,
1550 u16 *);
1551
1552 DC395x_LOCK_IO(acb->scsi_host, flags);
1553
1554
1555 scsi_intstatus = DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
1556 if ((scsi_status & 0x2007) == 0x2002)
1557 dprintkl(KERN_DEBUG,
1558 "COP after COP completed? %04x\n", scsi_status);
1559 if (debug_enabled(DBG_KG)) {
1560 if (scsi_intstatus & INT_SELTIMEOUT)
1561 dprintkdbg(DBG_KG, "handle_interrupt: Selection timeout\n");
1562 }
1563
1564
1565 if (timer_pending(&acb->selto_timer))
1566 del_timer(&acb->selto_timer);
1567
1568 if (scsi_intstatus & (INT_SELTIMEOUT | INT_DISCONNECT)) {
1569 disconnect(acb);
1570 goto out_unlock;
1571 }
1572 if (scsi_intstatus & INT_RESELECTED) {
1573 reselect(acb);
1574 goto out_unlock;
1575 }
1576 if (scsi_intstatus & INT_SELECT) {
1577 dprintkl(KERN_INFO, "Host does not support target mode!\n");
1578 goto out_unlock;
1579 }
1580 if (scsi_intstatus & INT_SCSIRESET) {
1581 scsi_reset_detect(acb);
1582 goto out_unlock;
1583 }
1584 if (scsi_intstatus & (INT_BUSSERVICE | INT_CMDDONE)) {
1585 dcb = acb->active_dcb;
1586 if (!dcb) {
1587 dprintkl(KERN_DEBUG,
1588 "Oops: BusService (%04x %02x) w/o ActiveDCB!\n",
1589 scsi_status, scsi_intstatus);
1590 goto out_unlock;
1591 }
1592 srb = dcb->active_srb;
1593 if (dcb->flag & ABORT_DEV_) {
1594 dprintkdbg(DBG_0, "MsgOut Abort Device.....\n");
1595 enable_msgout_abort(acb, srb);
1596 }
1597
1598
1599 phase = (u16)srb->scsi_phase;
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614 dc395x_statev = dc395x_scsi_phase0[phase];
1615 dc395x_statev(acb, srb, &scsi_status);
1616
1617
1618
1619
1620
1621
1622 srb->scsi_phase = scsi_status & PHASEMASK;
1623 phase = (u16)scsi_status & PHASEMASK;
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637 dc395x_statev = dc395x_scsi_phase1[phase];
1638 dc395x_statev(acb, srb, &scsi_status);
1639 }
1640 out_unlock:
1641 DC395x_UNLOCK_IO(acb->scsi_host, flags);
1642}
1643
1644
1645static irqreturn_t dc395x_interrupt(int irq, void *dev_id)
1646{
1647 struct AdapterCtlBlk *acb = dev_id;
1648 u16 scsi_status;
1649 u8 dma_status;
1650 irqreturn_t handled = IRQ_NONE;
1651
1652
1653
1654
1655 scsi_status = DC395x_read16(acb, TRM_S1040_SCSI_STATUS);
1656 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
1657 if (scsi_status & SCSIINTERRUPT) {
1658
1659 dc395x_handle_interrupt(acb, scsi_status);
1660 handled = IRQ_HANDLED;
1661 }
1662 else if (dma_status & 0x20) {
1663
1664 dprintkl(KERN_INFO, "Interrupt from DMA engine: 0x%02x!\n", dma_status);
1665#if 0
1666 dprintkl(KERN_INFO, "This means DMA error! Try to handle ...\n");
1667 if (acb->active_dcb) {
1668 acb->active_dcb-> flag |= ABORT_DEV_;
1669 if (acb->active_dcb->active_srb)
1670 enable_msgout_abort(acb, acb->active_dcb->active_srb);
1671 }
1672 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, ABORTXFER | CLRXFIFO);
1673#else
1674 dprintkl(KERN_INFO, "Ignoring DMA error (probably a bad thing) ...\n");
1675 acb = NULL;
1676#endif
1677 handled = IRQ_HANDLED;
1678 }
1679
1680 return handled;
1681}
1682
1683
1684static void msgout_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1685 u16 *pscsi_status)
1686{
1687 dprintkdbg(DBG_0, "msgout_phase0: (0x%p)\n", srb->cmd);
1688 if (srb->state & (SRB_UNEXPECT_RESEL + SRB_ABORT_SENT))
1689 *pscsi_status = PH_BUS_FREE;
1690
1691 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1692 srb->state &= ~SRB_MSGOUT;
1693}
1694
1695
1696static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1697 u16 *pscsi_status)
1698{
1699 u16 i;
1700 u8 *ptr;
1701 dprintkdbg(DBG_0, "msgout_phase1: (0x%p)\n", srb->cmd);
1702
1703 clear_fifo(acb, "msgout_phase1");
1704 if (!(srb->state & SRB_MSGOUT)) {
1705 srb->state |= SRB_MSGOUT;
1706 dprintkl(KERN_DEBUG,
1707 "msgout_phase1: (0x%p) Phase unexpected\n",
1708 srb->cmd);
1709 }
1710 if (!srb->msg_count) {
1711 dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n",
1712 srb->cmd);
1713 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, NOP);
1714 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1715
1716 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
1717 return;
1718 }
1719 ptr = (u8 *)srb->msgout_buf;
1720 for (i = 0; i < srb->msg_count; i++)
1721 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
1722 srb->msg_count = 0;
1723 if (srb->msgout_buf[0] == ABORT_TASK_SET)
1724 srb->state = SRB_ABORT_SENT;
1725
1726 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
1727}
1728
1729
1730static void command_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1731 u16 *pscsi_status)
1732{
1733 dprintkdbg(DBG_0, "command_phase0: (0x%p)\n", srb->cmd);
1734 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1735}
1736
1737
1738static void command_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1739 u16 *pscsi_status)
1740{
1741 struct DeviceCtlBlk *dcb;
1742 u8 *ptr;
1743 u16 i;
1744 dprintkdbg(DBG_0, "command_phase1: (0x%p)\n", srb->cmd);
1745
1746 clear_fifo(acb, "command_phase1");
1747 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_CLRATN);
1748 if (!(srb->flag & AUTO_REQSENSE)) {
1749 ptr = (u8 *)srb->cmd->cmnd;
1750 for (i = 0; i < srb->cmd->cmd_len; i++) {
1751 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr);
1752 ptr++;
1753 }
1754 } else {
1755 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, REQUEST_SENSE);
1756 dcb = acb->active_dcb;
1757
1758 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, (dcb->target_lun << 5));
1759 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1760 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1761 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SCSI_SENSE_BUFFERSIZE);
1762 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
1763 }
1764 srb->state |= SRB_COMMAND;
1765
1766 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1767
1768 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
1769}
1770
1771
1772
1773
1774
1775
1776static void sg_verify_length(struct ScsiReqBlk *srb)
1777{
1778 if (debug_enabled(DBG_SG)) {
1779 unsigned len = 0;
1780 unsigned idx = srb->sg_index;
1781 struct SGentry *psge = srb->segment_x + idx;
1782 for (; idx < srb->sg_count; psge++, idx++)
1783 len += psge->length;
1784 if (len != srb->total_xfer_length)
1785 dprintkdbg(DBG_SG,
1786 "Inconsistent SRB S/G lengths (Tot=%i, Count=%i) !!\n",
1787 srb->total_xfer_length, len);
1788 }
1789}
1790
1791
1792
1793
1794
1795
1796static void sg_update_list(struct ScsiReqBlk *srb, u32 left)
1797{
1798 u8 idx;
1799 u32 xferred = srb->total_xfer_length - left;
1800 struct SGentry *psge = srb->segment_x + srb->sg_index;
1801
1802 dprintkdbg(DBG_0,
1803 "sg_update_list: Transferred %i of %i bytes, %i remain\n",
1804 xferred, srb->total_xfer_length, left);
1805 if (xferred == 0) {
1806
1807 return;
1808 }
1809
1810 sg_verify_length(srb);
1811 srb->total_xfer_length = left;
1812 for (idx = srb->sg_index; idx < srb->sg_count; idx++) {
1813 if (xferred >= psge->length) {
1814
1815 xferred -= psge->length;
1816 } else {
1817
1818 dma_sync_single_for_cpu(&srb->dcb->acb->dev->dev,
1819 srb->sg_bus_addr, SEGMENTX_LEN,
1820 DMA_TO_DEVICE);
1821 psge->length -= xferred;
1822 psge->address += xferred;
1823 srb->sg_index = idx;
1824 dma_sync_single_for_device(&srb->dcb->acb->dev->dev,
1825 srb->sg_bus_addr, SEGMENTX_LEN,
1826 DMA_TO_DEVICE);
1827 break;
1828 }
1829 psge++;
1830 }
1831 sg_verify_length(srb);
1832}
1833
1834
1835
1836
1837
1838
1839
1840
1841static void sg_subtract_one(struct ScsiReqBlk *srb)
1842{
1843 sg_update_list(srb, srb->total_xfer_length - 1);
1844}
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855static void cleanup_after_transfer(struct AdapterCtlBlk *acb,
1856 struct ScsiReqBlk *srb)
1857{
1858
1859 if (DC395x_read16(acb, TRM_S1040_DMA_COMMAND) & 0x0001) {
1860 if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
1861 clear_fifo(acb, "cleanup/in");
1862 if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
1863 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
1864 } else {
1865 if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80))
1866 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
1867 if (!(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) & 0x40))
1868 clear_fifo(acb, "cleanup/out");
1869 }
1870 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
1871}
1872
1873
1874
1875
1876
1877
1878#define DC395x_LASTPIO 4
1879
1880
1881static void data_out_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
1882 u16 *pscsi_status)
1883{
1884 struct DeviceCtlBlk *dcb = srb->dcb;
1885 u16 scsi_status = *pscsi_status;
1886 u32 d_left_counter = 0;
1887 dprintkdbg(DBG_0, "data_out_phase0: (0x%p) <%02i-%i>\n",
1888 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902 dprintkdbg(DBG_PIO, "data_out_phase0: "
1903 "DMA{fifocnt=0x%02x fifostat=0x%02x} "
1904 "SCSI{fifocnt=0x%02x cnt=0x%06x status=0x%04x} total=0x%06x\n",
1905 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
1906 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
1907 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
1908 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER), scsi_status,
1909 srb->total_xfer_length);
1910 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, STOPDMAXFER | CLRXFIFO);
1911
1912 if (!(srb->state & SRB_XFERPAD)) {
1913 if (scsi_status & PARITYERROR)
1914 srb->status |= PARITY_ERROR;
1915
1916
1917
1918
1919
1920
1921
1922 if (!(scsi_status & SCSIXFERDONE)) {
1923
1924
1925
1926
1927 d_left_counter =
1928 (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
1929 0x1F);
1930 if (dcb->sync_period & WIDE_SYNC)
1931 d_left_counter <<= 1;
1932
1933 dprintkdbg(DBG_KG, "data_out_phase0: FIFO contains %i %s\n"
1934 "SCSI{fifocnt=0x%02x cnt=0x%08x} "
1935 "DMA{fifocnt=0x%04x cnt=0x%02x ctr=0x%08x}\n",
1936 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
1937 (dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
1938 DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT),
1939 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER),
1940 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
1941 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
1942 DC395x_read32(acb, TRM_S1040_DMA_CXCNT));
1943 }
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953 if (srb->total_xfer_length > DC395x_LASTPIO)
1954 d_left_counter +=
1955 DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
1956
1957
1958
1959
1960 if (d_left_counter == 1 && dcb->sync_period & WIDE_SYNC
1961 && scsi_bufflen(srb->cmd) % 2) {
1962 d_left_counter = 0;
1963 dprintkl(KERN_INFO,
1964 "data_out_phase0: Discard 1 byte (0x%02x)\n",
1965 scsi_status);
1966 }
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977 if (d_left_counter == 0) {
1978 srb->total_xfer_length = 0;
1979 } else {
1980
1981
1982
1983
1984
1985 long oldxferred =
1986 srb->total_xfer_length - d_left_counter;
1987 const int diff =
1988 (dcb->sync_period & WIDE_SYNC) ? 2 : 1;
1989 sg_update_list(srb, d_left_counter);
1990
1991 if ((srb->segment_x[srb->sg_index].length ==
1992 diff && scsi_sg_count(srb->cmd))
1993 || ((oldxferred & ~PAGE_MASK) ==
1994 (PAGE_SIZE - diff))
1995 ) {
1996 dprintkl(KERN_INFO, "data_out_phase0: "
1997 "Work around chip bug (%i)?\n", diff);
1998 d_left_counter =
1999 srb->total_xfer_length - diff;
2000 sg_update_list(srb, d_left_counter);
2001
2002
2003
2004
2005 }
2006 }
2007 }
2008 if ((*pscsi_status & PHASEMASK) != PH_DATA_OUT) {
2009 cleanup_after_transfer(acb, srb);
2010 }
2011}
2012
2013
2014static void data_out_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2015 u16 *pscsi_status)
2016{
2017 dprintkdbg(DBG_0, "data_out_phase1: (0x%p) <%02i-%i>\n",
2018 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2019 clear_fifo(acb, "data_out_phase1");
2020
2021 data_io_transfer(acb, srb, XFERDATAOUT);
2022}
2023
2024static void data_in_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2025 u16 *pscsi_status)
2026{
2027 u16 scsi_status = *pscsi_status;
2028
2029 dprintkdbg(DBG_0, "data_in_phase0: (0x%p) <%02i-%i>\n",
2030 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045 if (!(srb->state & SRB_XFERPAD)) {
2046 u32 d_left_counter;
2047 unsigned int sc, fc;
2048
2049 if (scsi_status & PARITYERROR) {
2050 dprintkl(KERN_INFO, "data_in_phase0: (0x%p) "
2051 "Parity Error\n", srb->cmd);
2052 srb->status |= PARITY_ERROR;
2053 }
2054
2055
2056
2057
2058
2059
2060 if (!(DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT) & 0x80)) {
2061#if 0
2062 int ctr = 6000000;
2063 dprintkl(KERN_DEBUG,
2064 "DIP0: Wait for DMA FIFO to flush ...\n");
2065
2066
2067
2068 while (!
2069 (DC395x_read16(acb, TRM_S1040_DMA_FIFOSTAT) &
2070 0x80) && --ctr);
2071 if (ctr < 6000000 - 1)
2072 dprintkl(KERN_DEBUG
2073 "DIP0: Had to wait for DMA ...\n");
2074 if (!ctr)
2075 dprintkl(KERN_ERR,
2076 "Deadlock in DIP0 waiting for DMA FIFO empty!!\n");
2077
2078#endif
2079 dprintkdbg(DBG_KG, "data_in_phase0: "
2080 "DMA{fifocnt=0x%02x fifostat=0x%02x}\n",
2081 DC395x_read8(acb, TRM_S1040_DMA_FIFOCNT),
2082 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT));
2083 }
2084
2085 sc = DC395x_read32(acb, TRM_S1040_SCSI_COUNTER);
2086 fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
2087 d_left_counter = sc + ((fc & 0x1f)
2088 << ((srb->dcb->sync_period & WIDE_SYNC) ? 1 :
2089 0));
2090 dprintkdbg(DBG_KG, "data_in_phase0: "
2091 "SCSI{fifocnt=0x%02x%s ctr=0x%08x} "
2092 "DMA{fifocnt=0x%02x fifostat=0x%02x ctr=0x%08x} "
2093 "Remain{totxfer=%i scsi_fifo+ctr=%i}\n",
2094 fc,
2095 (srb->dcb->sync_period & WIDE_SYNC) ? "words" : "bytes",
2096 sc,
2097 fc,
2098 DC395x_read8(acb, TRM_S1040_DMA_FIFOSTAT),
2099 DC395x_read32(acb, TRM_S1040_DMA_CXCNT),
2100 srb->total_xfer_length, d_left_counter);
2101#if DC395x_LASTPIO
2102
2103 if (d_left_counter
2104 && srb->total_xfer_length <= DC395x_LASTPIO) {
2105 size_t left_io = srb->total_xfer_length;
2106
2107
2108
2109 dprintkdbg(DBG_PIO, "data_in_phase0: PIO (%i %s) "
2110 "for remaining %i bytes:",
2111 fc & 0x1f,
2112 (srb->dcb->sync_period & WIDE_SYNC) ?
2113 "words" : "bytes",
2114 srb->total_xfer_length);
2115 if (srb->dcb->sync_period & WIDE_SYNC)
2116 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
2117 CFG2_WIDEFIFO);
2118 while (left_io) {
2119 unsigned char *virt, *base = NULL;
2120 unsigned long flags = 0;
2121 size_t len = left_io;
2122 size_t offset = srb->request_length - left_io;
2123
2124 local_irq_save(flags);
2125
2126
2127 base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
2128 srb->sg_count, &offset, &len);
2129 virt = base + offset;
2130
2131 left_io -= len;
2132
2133 while (len) {
2134 u8 byte;
2135 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2136 *virt++ = byte;
2137
2138 if (debug_enabled(DBG_PIO))
2139 printk(" %02x", byte);
2140
2141 d_left_counter--;
2142 sg_subtract_one(srb);
2143
2144 len--;
2145
2146 fc = DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT);
2147
2148 if (fc == 0x40) {
2149 left_io = 0;
2150 break;
2151 }
2152 }
2153
2154 WARN_ON((fc != 0x40) == !d_left_counter);
2155
2156 if (fc == 0x40 && (srb->dcb->sync_period & WIDE_SYNC)) {
2157
2158 if (srb->total_xfer_length > 0) {
2159 u8 byte = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2160
2161 *virt++ = byte;
2162 srb->total_xfer_length--;
2163 if (debug_enabled(DBG_PIO))
2164 printk(" %02x", byte);
2165 }
2166
2167 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2168 }
2169
2170 scsi_kunmap_atomic_sg(base);
2171 local_irq_restore(flags);
2172 }
2173
2174
2175 if (debug_enabled(DBG_PIO))
2176 printk("\n");
2177 }
2178#endif
2179
2180#if 0
2181
2182
2183
2184
2185 if (!(scsi_status & SCSIXFERDONE)) {
2186
2187
2188
2189
2190 d_left_counter =
2191 (u32)(DC395x_read8(acb, TRM_S1040_SCSI_FIFOCNT) &
2192 0x1F);
2193 if (srb->dcb->sync_period & WIDE_SYNC)
2194 d_left_counter <<= 1;
2195
2196
2197
2198
2199
2200 }
2201#endif
2202
2203 if (d_left_counter == 0
2204 || (scsi_status & SCSIXFERCNT_2_ZERO)) {
2205#if 0
2206 int ctr = 6000000;
2207 u8 TempDMAstatus;
2208 do {
2209 TempDMAstatus =
2210 DC395x_read8(acb, TRM_S1040_DMA_STATUS);
2211 } while (!(TempDMAstatus & DMAXFERCOMP) && --ctr);
2212 if (!ctr)
2213 dprintkl(KERN_ERR,
2214 "Deadlock in DataInPhase0 waiting for DMA!!\n");
2215 srb->total_xfer_length = 0;
2216#endif
2217 srb->total_xfer_length = d_left_counter;
2218 } else {
2219
2220
2221
2222
2223
2224
2225
2226
2227 sg_update_list(srb, d_left_counter);
2228 }
2229 }
2230
2231 if ((*pscsi_status & PHASEMASK) != PH_DATA_IN) {
2232 cleanup_after_transfer(acb, srb);
2233 }
2234}
2235
2236
2237static void data_in_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2238 u16 *pscsi_status)
2239{
2240 dprintkdbg(DBG_0, "data_in_phase1: (0x%p) <%02i-%i>\n",
2241 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2242 data_io_transfer(acb, srb, XFERDATAIN);
2243}
2244
2245
2246static void data_io_transfer(struct AdapterCtlBlk *acb,
2247 struct ScsiReqBlk *srb, u16 io_dir)
2248{
2249 struct DeviceCtlBlk *dcb = srb->dcb;
2250 u8 bval;
2251 dprintkdbg(DBG_0,
2252 "data_io_transfer: (0x%p) <%02i-%i> %c len=%i, sg=(%i/%i)\n",
2253 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun,
2254 ((io_dir & DMACMD_DIR) ? 'r' : 'w'),
2255 srb->total_xfer_length, srb->sg_index, srb->sg_count);
2256 if (srb == acb->tmp_srb)
2257 dprintkl(KERN_ERR, "data_io_transfer: Using tmp_srb!\n");
2258 if (srb->sg_index >= srb->sg_count) {
2259
2260 return;
2261 }
2262
2263 if (srb->total_xfer_length > DC395x_LASTPIO) {
2264 u8 dma_status = DC395x_read8(acb, TRM_S1040_DMA_STATUS);
2265
2266
2267
2268
2269 if (dma_status & XFERPENDING) {
2270 dprintkl(KERN_DEBUG, "data_io_transfer: Xfer pending! "
2271 "Expect trouble!\n");
2272 dump_register_info(acb, dcb, srb);
2273 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, CLRXFIFO);
2274 }
2275
2276
2277
2278
2279
2280 srb->state |= SRB_DATA_XFER;
2281 DC395x_write32(acb, TRM_S1040_DMA_XHIGHADDR, 0);
2282 if (scsi_sg_count(srb->cmd)) {
2283 io_dir |= DMACMD_SG;
2284 DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
2285 srb->sg_bus_addr +
2286 sizeof(struct SGentry) *
2287 srb->sg_index);
2288
2289 DC395x_write32(acb, TRM_S1040_DMA_XCNT,
2290 ((u32)(srb->sg_count -
2291 srb->sg_index) << 3));
2292 } else {
2293 io_dir &= ~DMACMD_SG;
2294 DC395x_write32(acb, TRM_S1040_DMA_XLOWADDR,
2295 srb->segment_x[0].address);
2296 DC395x_write32(acb, TRM_S1040_DMA_XCNT,
2297 srb->segment_x[0].length);
2298 }
2299
2300 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
2301 srb->total_xfer_length);
2302 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2303 if (io_dir & DMACMD_DIR) {
2304 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2305 SCMD_DMA_IN);
2306 DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
2307 } else {
2308 DC395x_write16(acb, TRM_S1040_DMA_COMMAND, io_dir);
2309 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2310 SCMD_DMA_OUT);
2311 }
2312
2313 }
2314#if DC395x_LASTPIO
2315 else if (srb->total_xfer_length > 0) {
2316
2317
2318
2319
2320 srb->state |= SRB_DATA_XFER;
2321
2322 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER,
2323 srb->total_xfer_length);
2324 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2325 if (io_dir & DMACMD_DIR) {
2326 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2327 SCMD_FIFO_IN);
2328 } else {
2329 int ln = srb->total_xfer_length;
2330 size_t left_io = srb->total_xfer_length;
2331
2332 if (srb->dcb->sync_period & WIDE_SYNC)
2333 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
2334 CFG2_WIDEFIFO);
2335
2336 while (left_io) {
2337 unsigned char *virt, *base = NULL;
2338 unsigned long flags = 0;
2339 size_t len = left_io;
2340 size_t offset = srb->request_length - left_io;
2341
2342 local_irq_save(flags);
2343
2344 base = scsi_kmap_atomic_sg(scsi_sglist(srb->cmd),
2345 srb->sg_count, &offset, &len);
2346 virt = base + offset;
2347
2348 left_io -= len;
2349
2350 while (len--) {
2351 if (debug_enabled(DBG_PIO))
2352 printk(" %02x", *virt);
2353
2354 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *virt++);
2355
2356 sg_subtract_one(srb);
2357 }
2358
2359 scsi_kunmap_atomic_sg(base);
2360 local_irq_restore(flags);
2361 }
2362 if (srb->dcb->sync_period & WIDE_SYNC) {
2363 if (ln % 2) {
2364 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 0);
2365 if (debug_enabled(DBG_PIO))
2366 printk(" |00");
2367 }
2368 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2369 }
2370
2371 if (debug_enabled(DBG_PIO))
2372 printk("\n");
2373 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND,
2374 SCMD_FIFO_OUT);
2375 }
2376 }
2377#endif
2378 else {
2379 if (srb->sg_count) {
2380 srb->adapter_status = H_OVER_UNDER_RUN;
2381 srb->status |= OVER_RUN;
2382 }
2383
2384
2385
2386
2387
2388 if (dcb->sync_period & WIDE_SYNC) {
2389 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 2);
2390 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2,
2391 CFG2_WIDEFIFO);
2392 if (io_dir & DMACMD_DIR) {
2393 DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2394 DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2395 } else {
2396
2397
2398
2399 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
2400 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'G');
2401 }
2402 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG2, 0);
2403 } else {
2404 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
2405
2406
2407 if (io_dir & DMACMD_DIR)
2408 DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2409 else
2410 DC395x_write8(acb, TRM_S1040_SCSI_FIFO, 'K');
2411 }
2412 srb->state |= SRB_XFERPAD;
2413 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2414
2415 bval = (io_dir & DMACMD_DIR) ? SCMD_FIFO_IN : SCMD_FIFO_OUT;
2416 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, bval);
2417 }
2418}
2419
2420
2421static void status_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2422 u16 *pscsi_status)
2423{
2424 dprintkdbg(DBG_0, "status_phase0: (0x%p) <%02i-%i>\n",
2425 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2426 srb->target_status = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2427 srb->end_message = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2428 srb->state = SRB_COMPLETED;
2429 *pscsi_status = PH_BUS_FREE;
2430 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2431 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
2432}
2433
2434
2435static void status_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2436 u16 *pscsi_status)
2437{
2438 dprintkdbg(DBG_0, "status_phase1: (0x%p) <%02i-%i>\n",
2439 srb->cmd, srb->cmd->device->id, (u8)srb->cmd->device->lun);
2440 srb->state = SRB_STATUS;
2441 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2442 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_COMP);
2443}
2444
2445
2446
2447static inline u8 msgin_completed(u8 * msgbuf, u32 len)
2448{
2449 if (*msgbuf == EXTENDED_MESSAGE) {
2450 if (len < 2)
2451 return 0;
2452 if (len < msgbuf[1] + 2)
2453 return 0;
2454 } else if (*msgbuf >= 0x20 && *msgbuf <= 0x2f)
2455 if (len < 2)
2456 return 0;
2457 return 1;
2458}
2459
2460
2461static inline void msgin_reject(struct AdapterCtlBlk *acb,
2462 struct ScsiReqBlk *srb)
2463{
2464 srb->msgout_buf[0] = MESSAGE_REJECT;
2465 srb->msg_count = 1;
2466 DC395x_ENABLE_MSGOUT;
2467 srb->state &= ~SRB_MSGIN;
2468 srb->state |= SRB_MSGOUT;
2469 dprintkl(KERN_INFO, "msgin_reject: 0x%02x <%02i-%i>\n",
2470 srb->msgin_buf[0],
2471 srb->dcb->target_id, srb->dcb->target_lun);
2472}
2473
2474
2475static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
2476 struct DeviceCtlBlk *dcb, u8 tag)
2477{
2478 struct ScsiReqBlk *srb = NULL;
2479 struct ScsiReqBlk *i;
2480 dprintkdbg(DBG_0, "msgin_qtag: (0x%p) tag=%i srb=%p\n",
2481 srb->cmd, tag, srb);
2482
2483 if (!(dcb->tag_mask & (1 << tag)))
2484 dprintkl(KERN_DEBUG,
2485 "msgin_qtag: tag_mask=0x%08x does not reserve tag %i!\n",
2486 dcb->tag_mask, tag);
2487
2488 if (list_empty(&dcb->srb_going_list))
2489 goto mingx0;
2490 list_for_each_entry(i, &dcb->srb_going_list, list) {
2491 if (i->tag_number == tag) {
2492 srb = i;
2493 break;
2494 }
2495 }
2496 if (!srb)
2497 goto mingx0;
2498
2499 dprintkdbg(DBG_0, "msgin_qtag: (0x%p) <%02i-%i>\n",
2500 srb->cmd, srb->dcb->target_id, srb->dcb->target_lun);
2501 if (dcb->flag & ABORT_DEV_) {
2502
2503 enable_msgout_abort(acb, srb);
2504 }
2505
2506 if (!(srb->state & SRB_DISCONNECT))
2507 goto mingx0;
2508
2509 memcpy(srb->msgin_buf, dcb->active_srb->msgin_buf, acb->msg_len);
2510 srb->state |= dcb->active_srb->state;
2511 srb->state |= SRB_DATA_XFER;
2512 dcb->active_srb = srb;
2513
2514 return srb;
2515
2516 mingx0:
2517 srb = acb->tmp_srb;
2518 srb->state = SRB_UNEXPECT_RESEL;
2519 dcb->active_srb = srb;
2520 srb->msgout_buf[0] = ABORT_TASK;
2521 srb->msg_count = 1;
2522 DC395x_ENABLE_MSGOUT;
2523 dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
2524 return srb;
2525}
2526
2527
2528static inline void reprogram_regs(struct AdapterCtlBlk *acb,
2529 struct DeviceCtlBlk *dcb)
2530{
2531 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
2532 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
2533 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
2534 set_xfer_rate(acb, dcb);
2535}
2536
2537
2538
2539static void msgin_set_async(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2540{
2541 struct DeviceCtlBlk *dcb = srb->dcb;
2542 dprintkl(KERN_DEBUG, "msgin_set_async: No sync transfers <%02i-%i>\n",
2543 dcb->target_id, dcb->target_lun);
2544
2545 dcb->sync_mode &= ~(SYNC_NEGO_ENABLE);
2546 dcb->sync_mode |= SYNC_NEGO_DONE;
2547
2548 dcb->sync_offset = 0;
2549 dcb->min_nego_period = 200 >> 2;
2550 srb->state &= ~SRB_DO_SYNC_NEGO;
2551 reprogram_regs(acb, dcb);
2552 if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
2553 && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
2554 build_wdtr(acb, dcb, srb);
2555 DC395x_ENABLE_MSGOUT;
2556 dprintkdbg(DBG_0, "msgin_set_async(rej): Try WDTR anyway\n");
2557 }
2558}
2559
2560
2561
2562static void msgin_set_sync(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2563{
2564 struct DeviceCtlBlk *dcb = srb->dcb;
2565 u8 bval;
2566 int fact;
2567 dprintkdbg(DBG_1, "msgin_set_sync: <%02i> Sync: %ins "
2568 "(%02i.%01i MHz) Offset %i\n",
2569 dcb->target_id, srb->msgin_buf[3] << 2,
2570 (250 / srb->msgin_buf[3]),
2571 ((250 % srb->msgin_buf[3]) * 10) / srb->msgin_buf[3],
2572 srb->msgin_buf[4]);
2573
2574 if (srb->msgin_buf[4] > 15)
2575 srb->msgin_buf[4] = 15;
2576 if (!(dcb->dev_mode & NTC_DO_SYNC_NEGO))
2577 dcb->sync_offset = 0;
2578 else if (dcb->sync_offset == 0)
2579 dcb->sync_offset = srb->msgin_buf[4];
2580 if (srb->msgin_buf[4] > dcb->sync_offset)
2581 srb->msgin_buf[4] = dcb->sync_offset;
2582 else
2583 dcb->sync_offset = srb->msgin_buf[4];
2584 bval = 0;
2585 while (bval < 7 && (srb->msgin_buf[3] > clock_period[bval]
2586 || dcb->min_nego_period >
2587 clock_period[bval]))
2588 bval++;
2589 if (srb->msgin_buf[3] < clock_period[bval])
2590 dprintkl(KERN_INFO,
2591 "msgin_set_sync: Increase sync nego period to %ins\n",
2592 clock_period[bval] << 2);
2593 srb->msgin_buf[3] = clock_period[bval];
2594 dcb->sync_period &= 0xf0;
2595 dcb->sync_period |= ALT_SYNC | bval;
2596 dcb->min_nego_period = srb->msgin_buf[3];
2597
2598 if (dcb->sync_period & WIDE_SYNC)
2599 fact = 500;
2600 else
2601 fact = 250;
2602
2603 dprintkl(KERN_INFO,
2604 "Target %02i: %s Sync: %ins Offset %i (%02i.%01i MB/s)\n",
2605 dcb->target_id, (fact == 500) ? "Wide16" : "",
2606 dcb->min_nego_period << 2, dcb->sync_offset,
2607 (fact / dcb->min_nego_period),
2608 ((fact % dcb->min_nego_period) * 10 +
2609 dcb->min_nego_period / 2) / dcb->min_nego_period);
2610
2611 if (!(srb->state & SRB_DO_SYNC_NEGO)) {
2612
2613 dprintkl(KERN_DEBUG, "msgin_set_sync: answer w/%ins %i\n",
2614 srb->msgin_buf[3] << 2, srb->msgin_buf[4]);
2615
2616 memcpy(srb->msgout_buf, srb->msgin_buf, 5);
2617 srb->msg_count = 5;
2618 DC395x_ENABLE_MSGOUT;
2619 dcb->sync_mode |= SYNC_NEGO_DONE;
2620 } else {
2621 if ((dcb->sync_mode & WIDE_NEGO_ENABLE)
2622 && !(dcb->sync_mode & WIDE_NEGO_DONE)) {
2623 build_wdtr(acb, dcb, srb);
2624 DC395x_ENABLE_MSGOUT;
2625 dprintkdbg(DBG_0, "msgin_set_sync: Also try WDTR\n");
2626 }
2627 }
2628 srb->state &= ~SRB_DO_SYNC_NEGO;
2629 dcb->sync_mode |= SYNC_NEGO_DONE | SYNC_NEGO_ENABLE;
2630
2631 reprogram_regs(acb, dcb);
2632}
2633
2634
2635static inline void msgin_set_nowide(struct AdapterCtlBlk *acb,
2636 struct ScsiReqBlk *srb)
2637{
2638 struct DeviceCtlBlk *dcb = srb->dcb;
2639 dprintkdbg(DBG_1, "msgin_set_nowide: <%02i>\n", dcb->target_id);
2640
2641 dcb->sync_period &= ~WIDE_SYNC;
2642 dcb->sync_mode &= ~(WIDE_NEGO_ENABLE);
2643 dcb->sync_mode |= WIDE_NEGO_DONE;
2644 srb->state &= ~SRB_DO_WIDE_NEGO;
2645 reprogram_regs(acb, dcb);
2646 if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
2647 && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
2648 build_sdtr(acb, dcb, srb);
2649 DC395x_ENABLE_MSGOUT;
2650 dprintkdbg(DBG_0, "msgin_set_nowide: Rejected. Try SDTR anyway\n");
2651 }
2652}
2653
2654static void msgin_set_wide(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
2655{
2656 struct DeviceCtlBlk *dcb = srb->dcb;
2657 u8 wide = (dcb->dev_mode & NTC_DO_WIDE_NEGO
2658 && acb->config & HCC_WIDE_CARD) ? 1 : 0;
2659 dprintkdbg(DBG_1, "msgin_set_wide: <%02i>\n", dcb->target_id);
2660
2661 if (srb->msgin_buf[3] > wide)
2662 srb->msgin_buf[3] = wide;
2663
2664 if (!(srb->state & SRB_DO_WIDE_NEGO)) {
2665 dprintkl(KERN_DEBUG,
2666 "msgin_set_wide: Wide nego initiated <%02i>\n",
2667 dcb->target_id);
2668 memcpy(srb->msgout_buf, srb->msgin_buf, 4);
2669 srb->msg_count = 4;
2670 srb->state |= SRB_DO_WIDE_NEGO;
2671 DC395x_ENABLE_MSGOUT;
2672 }
2673
2674 dcb->sync_mode |= (WIDE_NEGO_ENABLE | WIDE_NEGO_DONE);
2675 if (srb->msgin_buf[3] > 0)
2676 dcb->sync_period |= WIDE_SYNC;
2677 else
2678 dcb->sync_period &= ~WIDE_SYNC;
2679 srb->state &= ~SRB_DO_WIDE_NEGO;
2680
2681 dprintkdbg(DBG_1,
2682 "msgin_set_wide: Wide (%i bit) negotiated <%02i>\n",
2683 (8 << srb->msgin_buf[3]), dcb->target_id);
2684 reprogram_regs(acb, dcb);
2685 if ((dcb->sync_mode & SYNC_NEGO_ENABLE)
2686 && !(dcb->sync_mode & SYNC_NEGO_DONE)) {
2687 build_sdtr(acb, dcb, srb);
2688 DC395x_ENABLE_MSGOUT;
2689 dprintkdbg(DBG_0, "msgin_set_wide: Also try SDTR.\n");
2690 }
2691}
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2707 u16 *pscsi_status)
2708{
2709 struct DeviceCtlBlk *dcb = acb->active_dcb;
2710 dprintkdbg(DBG_0, "msgin_phase0: (0x%p)\n", srb->cmd);
2711
2712 srb->msgin_buf[acb->msg_len++] = DC395x_read8(acb, TRM_S1040_SCSI_FIFO);
2713 if (msgin_completed(srb->msgin_buf, acb->msg_len)) {
2714
2715 switch (srb->msgin_buf[0]) {
2716 case DISCONNECT:
2717 srb->state = SRB_DISCONNECT;
2718 break;
2719
2720 case SIMPLE_QUEUE_TAG:
2721 case HEAD_OF_QUEUE_TAG:
2722 case ORDERED_QUEUE_TAG:
2723 srb =
2724 msgin_qtag(acb, dcb,
2725 srb->msgin_buf[1]);
2726 break;
2727
2728 case MESSAGE_REJECT:
2729 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL,
2730 DO_CLRATN | DO_DATALATCH);
2731
2732 if (srb->state & SRB_DO_SYNC_NEGO) {
2733 msgin_set_async(acb, srb);
2734 break;
2735 }
2736
2737 if (srb->state & SRB_DO_WIDE_NEGO) {
2738 msgin_set_nowide(acb, srb);
2739 break;
2740 }
2741 enable_msgout_abort(acb, srb);
2742
2743 break;
2744
2745 case EXTENDED_MESSAGE:
2746
2747 if (srb->msgin_buf[1] == 3
2748 && srb->msgin_buf[2] == EXTENDED_SDTR) {
2749 msgin_set_sync(acb, srb);
2750 break;
2751 }
2752
2753 if (srb->msgin_buf[1] == 2
2754 && srb->msgin_buf[2] == EXTENDED_WDTR
2755 && srb->msgin_buf[3] <= 2) {
2756 msgin_set_wide(acb, srb);
2757 break;
2758 }
2759 msgin_reject(acb, srb);
2760 break;
2761
2762 case IGNORE_WIDE_RESIDUE:
2763
2764 dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
2765 break;
2766
2767 case COMMAND_COMPLETE:
2768
2769 break;
2770
2771 case SAVE_POINTERS:
2772
2773
2774
2775
2776 dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
2777 "SAVE POINTER rem=%i Ignore\n",
2778 srb->cmd, srb->total_xfer_length);
2779 break;
2780
2781 case RESTORE_POINTERS:
2782 dprintkdbg(DBG_0, "msgin_phase0: RESTORE POINTER. Ignore\n");
2783 break;
2784
2785 case ABORT:
2786 dprintkdbg(DBG_0, "msgin_phase0: (0x%p) "
2787 "<%02i-%i> ABORT msg\n",
2788 srb->cmd, dcb->target_id,
2789 dcb->target_lun);
2790 dcb->flag |= ABORT_DEV_;
2791 enable_msgout_abort(acb, srb);
2792 break;
2793
2794 default:
2795
2796 if (srb->msgin_buf[0] & IDENTIFY_BASE) {
2797 dprintkdbg(DBG_0, "msgin_phase0: Identify msg\n");
2798 srb->msg_count = 1;
2799 srb->msgout_buf[0] = dcb->identify_msg;
2800 DC395x_ENABLE_MSGOUT;
2801 srb->state |= SRB_MSGOUT;
2802
2803 }
2804 msgin_reject(acb, srb);
2805 }
2806
2807
2808 srb->state &= ~SRB_MSGIN;
2809 acb->msg_len = 0;
2810 }
2811 *pscsi_status = PH_BUS_FREE;
2812 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2813 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
2814}
2815
2816
2817static void msgin_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2818 u16 *pscsi_status)
2819{
2820 dprintkdbg(DBG_0, "msgin_phase1: (0x%p)\n", srb->cmd);
2821 clear_fifo(acb, "msgin_phase1");
2822 DC395x_write32(acb, TRM_S1040_SCSI_COUNTER, 1);
2823 if (!(srb->state & SRB_MSGIN)) {
2824 srb->state &= ~SRB_DISCONNECT;
2825 srb->state |= SRB_MSGIN;
2826 }
2827 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2828
2829 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_IN);
2830}
2831
2832
2833static void nop0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2834 u16 *pscsi_status)
2835{
2836}
2837
2838
2839static void nop1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
2840 u16 *pscsi_status)
2841{
2842}
2843
2844
2845static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb)
2846{
2847 struct DeviceCtlBlk *i;
2848
2849
2850 if (dcb->identify_msg & 0x07)
2851 return;
2852
2853 if (acb->scan_devices) {
2854 current_sync_offset = dcb->sync_offset;
2855 return;
2856 }
2857
2858 list_for_each_entry(i, &acb->dcb_list, list)
2859 if (i->target_id == dcb->target_id) {
2860 i->sync_period = dcb->sync_period;
2861 i->sync_offset = dcb->sync_offset;
2862 i->sync_mode = dcb->sync_mode;
2863 i->min_nego_period = dcb->min_nego_period;
2864 }
2865}
2866
2867
2868static void disconnect(struct AdapterCtlBlk *acb)
2869{
2870 struct DeviceCtlBlk *dcb = acb->active_dcb;
2871 struct ScsiReqBlk *srb;
2872
2873 if (!dcb) {
2874 dprintkl(KERN_ERR, "disconnect: No such device\n");
2875 udelay(500);
2876
2877 acb->last_reset =
2878 jiffies + HZ / 2 +
2879 HZ * acb->eeprom.delay_time;
2880 clear_fifo(acb, "disconnectEx");
2881 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
2882 return;
2883 }
2884 srb = dcb->active_srb;
2885 acb->active_dcb = NULL;
2886 dprintkdbg(DBG_0, "disconnect: (0x%p)\n", srb->cmd);
2887
2888 srb->scsi_phase = PH_BUS_FREE;
2889 clear_fifo(acb, "disconnect");
2890 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_HWRESELECT);
2891 if (srb->state & SRB_UNEXPECT_RESEL) {
2892 dprintkl(KERN_ERR,
2893 "disconnect: Unexpected reselection <%02i-%i>\n",
2894 dcb->target_id, dcb->target_lun);
2895 srb->state = 0;
2896 waiting_process_next(acb);
2897 } else if (srb->state & SRB_ABORT_SENT) {
2898 dcb->flag &= ~ABORT_DEV_;
2899 acb->last_reset = jiffies + HZ / 2 + 1;
2900 dprintkl(KERN_ERR, "disconnect: SRB_ABORT_SENT\n");
2901 doing_srb_done(acb, DID_ABORT, srb->cmd, 1);
2902 waiting_process_next(acb);
2903 } else {
2904 if ((srb->state & (SRB_START_ + SRB_MSGOUT))
2905 || !(srb->
2906 state & (SRB_DISCONNECT | SRB_COMPLETED))) {
2907
2908
2909
2910
2911
2912 if (srb->state != SRB_START_
2913 && srb->state != SRB_MSGOUT) {
2914 srb->state = SRB_READY;
2915 dprintkl(KERN_DEBUG,
2916 "disconnect: (0x%p) Unexpected\n",
2917 srb->cmd);
2918 srb->target_status = SCSI_STAT_SEL_TIMEOUT;
2919 goto disc1;
2920 } else {
2921
2922 dprintkdbg(DBG_KG, "disconnect: (0x%p) "
2923 "<%02i-%i> SelTO\n", srb->cmd,
2924 dcb->target_id, dcb->target_lun);
2925 if (srb->retry_count++ > DC395x_MAX_RETRIES
2926 || acb->scan_devices) {
2927 srb->target_status =
2928 SCSI_STAT_SEL_TIMEOUT;
2929 goto disc1;
2930 }
2931 free_tag(dcb, srb);
2932 list_move(&srb->list, &dcb->srb_waiting_list);
2933 dprintkdbg(DBG_KG,
2934 "disconnect: (0x%p) Retry\n",
2935 srb->cmd);
2936 waiting_set_timer(acb, HZ / 20);
2937 }
2938 } else if (srb->state & SRB_DISCONNECT) {
2939 u8 bval = DC395x_read8(acb, TRM_S1040_SCSI_SIGNAL);
2940
2941
2942
2943 if (bval & 0x40) {
2944 dprintkdbg(DBG_0, "disconnect: SCSI bus stat "
2945 " 0x%02x: ACK set! Other controllers?\n",
2946 bval);
2947
2948 } else
2949 waiting_process_next(acb);
2950 } else if (srb->state & SRB_COMPLETED) {
2951 disc1:
2952
2953
2954
2955 free_tag(dcb, srb);
2956 dcb->active_srb = NULL;
2957 srb->state = SRB_FREE;
2958 srb_done(acb, dcb, srb);
2959 }
2960 }
2961}
2962
2963
2964static void reselect(struct AdapterCtlBlk *acb)
2965{
2966 struct DeviceCtlBlk *dcb = acb->active_dcb;
2967 struct ScsiReqBlk *srb = NULL;
2968 u16 rsel_tar_lun_id;
2969 u8 id, lun;
2970 dprintkdbg(DBG_0, "reselect: acb=%p\n", acb);
2971
2972 clear_fifo(acb, "reselect");
2973
2974
2975 rsel_tar_lun_id = DC395x_read16(acb, TRM_S1040_SCSI_TARGETID);
2976 if (dcb) {
2977 srb = dcb->active_srb;
2978 if (!srb) {
2979 dprintkl(KERN_DEBUG, "reselect: Arb lost Resel won, "
2980 "but active_srb == NULL\n");
2981 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
2982 return;
2983 }
2984
2985 if (!acb->scan_devices) {
2986 dprintkdbg(DBG_KG, "reselect: (0x%p) <%02i-%i> "
2987 "Arb lost but Resel win rsel=%i stat=0x%04x\n",
2988 srb->cmd, dcb->target_id,
2989 dcb->target_lun, rsel_tar_lun_id,
2990 DC395x_read16(acb, TRM_S1040_SCSI_STATUS));
2991
2992
2993 srb->state = SRB_READY;
2994 free_tag(dcb, srb);
2995 list_move(&srb->list, &dcb->srb_waiting_list);
2996 waiting_set_timer(acb, HZ / 20);
2997
2998
2999 }
3000 }
3001
3002 if (!(rsel_tar_lun_id & (IDENTIFY_BASE << 8)))
3003 dprintkl(KERN_DEBUG, "reselect: Expects identify msg. "
3004 "Got %i!\n", rsel_tar_lun_id);
3005 id = rsel_tar_lun_id & 0xff;
3006 lun = (rsel_tar_lun_id >> 8) & 7;
3007 dcb = find_dcb(acb, id, lun);
3008 if (!dcb) {
3009 dprintkl(KERN_ERR, "reselect: From non existent device "
3010 "<%02i-%i>\n", id, lun);
3011 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
3012 return;
3013 }
3014 acb->active_dcb = dcb;
3015
3016 if (!(dcb->dev_mode & NTC_DO_DISCONNECT))
3017 dprintkl(KERN_DEBUG, "reselect: in spite of forbidden "
3018 "disconnection? <%02i-%i>\n",
3019 dcb->target_id, dcb->target_lun);
3020
3021 if (dcb->sync_mode & EN_TAG_QUEUEING) {
3022 srb = acb->tmp_srb;
3023 dcb->active_srb = srb;
3024 } else {
3025
3026 srb = dcb->active_srb;
3027 if (!srb || !(srb->state & SRB_DISCONNECT)) {
3028
3029
3030
3031 dprintkl(KERN_DEBUG,
3032 "reselect: w/o disconnected cmds <%02i-%i>\n",
3033 dcb->target_id, dcb->target_lun);
3034 srb = acb->tmp_srb;
3035 srb->state = SRB_UNEXPECT_RESEL;
3036 dcb->active_srb = srb;
3037 enable_msgout_abort(acb, srb);
3038 } else {
3039 if (dcb->flag & ABORT_DEV_) {
3040
3041 enable_msgout_abort(acb, srb);
3042 } else
3043 srb->state = SRB_DATA_XFER;
3044
3045 }
3046 }
3047 srb->scsi_phase = PH_BUS_FREE;
3048
3049
3050 dprintkdbg(DBG_0, "reselect: select <%i>\n", dcb->target_id);
3051 DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
3052 DC395x_write8(acb, TRM_S1040_SCSI_TARGETID, dcb->target_id);
3053 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, dcb->sync_offset);
3054 DC395x_write8(acb, TRM_S1040_SCSI_SYNC, dcb->sync_period);
3055 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
3056
3057 DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_MSGACCEPT);
3058}
3059
3060
3061static inline u8 tagq_blacklist(char *name)
3062{
3063#ifndef DC395x_NO_TAGQ
3064#if 0
3065 u8 i;
3066 for (i = 0; i < BADDEVCNT; i++)
3067 if (memcmp(name, DC395x_baddevname1[i], 28) == 0)
3068 return 1;
3069#endif
3070 return 0;
3071#else
3072 return 1;
3073#endif
3074}
3075
3076
3077static void disc_tagq_set(struct DeviceCtlBlk *dcb, struct ScsiInqData *ptr)
3078{
3079
3080 if ((ptr->Vers & 0x07) >= 2 || (ptr->RDF & 0x0F) == 2) {
3081 if ((ptr->Flags & SCSI_INQ_CMDQUEUE)
3082 && (dcb->dev_mode & NTC_DO_TAG_QUEUEING) &&
3083
3084
3085
3086 !tagq_blacklist(((char *)ptr) + 8)) {
3087 if (dcb->max_command == 1)
3088 dcb->max_command =
3089 dcb->acb->tag_max_num;
3090 dcb->sync_mode |= EN_TAG_QUEUEING;
3091
3092 } else
3093 dcb->max_command = 1;
3094 }
3095}
3096
3097
3098static void add_dev(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3099 struct ScsiInqData *ptr)
3100{
3101 u8 bval1 = ptr->DevType & SCSI_DEVTYPE;
3102 dcb->dev_type = bval1;
3103
3104 disc_tagq_set(dcb, ptr);
3105}
3106
3107
3108
3109static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
3110{
3111 struct scsi_cmnd *cmd = srb->cmd;
3112 enum dma_data_direction dir = cmd->sc_data_direction;
3113
3114 if (scsi_sg_count(cmd) && dir != DMA_NONE) {
3115
3116 dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
3117 srb->sg_bus_addr, SEGMENTX_LEN);
3118 dma_unmap_single(&acb->dev->dev, srb->sg_bus_addr, SEGMENTX_LEN,
3119 DMA_TO_DEVICE);
3120 dprintkdbg(DBG_SG, "pci_unmap_srb: segs=%i buffer=%p\n",
3121 scsi_sg_count(cmd), scsi_bufflen(cmd));
3122
3123 scsi_dma_unmap(cmd);
3124 }
3125}
3126
3127
3128
3129static void pci_unmap_srb_sense(struct AdapterCtlBlk *acb,
3130 struct ScsiReqBlk *srb)
3131{
3132 if (!(srb->flag & AUTO_REQSENSE))
3133 return;
3134
3135 dprintkdbg(DBG_SG, "pci_unmap_srb_sense: buffer=%08x\n",
3136 srb->segment_x[0].address);
3137 dma_unmap_single(&acb->dev->dev, srb->segment_x[0].address,
3138 srb->segment_x[0].length, DMA_FROM_DEVICE);
3139
3140 srb->total_xfer_length = srb->xferred;
3141 srb->segment_x[0].address =
3142 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address;
3143 srb->segment_x[0].length =
3144 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length;
3145}
3146
3147
3148
3149
3150
3151
3152static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3153 struct ScsiReqBlk *srb)
3154{
3155 u8 tempcnt, status;
3156 struct scsi_cmnd *cmd = srb->cmd;
3157 enum dma_data_direction dir = cmd->sc_data_direction;
3158 int ckc_only = 1;
3159
3160 dprintkdbg(DBG_1, "srb_done: (0x%p) <%02i-%i>\n", srb->cmd,
3161 srb->cmd->device->id, (u8)srb->cmd->device->lun);
3162 dprintkdbg(DBG_SG, "srb_done: srb=%p sg=%i(%i/%i) buf=%p\n",
3163 srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
3164 scsi_sgtalbe(cmd));
3165 status = srb->target_status;
3166 set_host_byte(cmd, DID_OK);
3167 set_status_byte(cmd, SAM_STAT_GOOD);
3168 if (srb->flag & AUTO_REQSENSE) {
3169 dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
3170 pci_unmap_srb_sense(acb, srb);
3171
3172
3173
3174 srb->flag &= ~AUTO_REQSENSE;
3175 srb->adapter_status = 0;
3176 srb->target_status = SAM_STAT_CHECK_CONDITION;
3177 if (debug_enabled(DBG_1)) {
3178 switch (cmd->sense_buffer[2] & 0x0f) {
3179 case NOT_READY:
3180 dprintkl(KERN_DEBUG,
3181 "ReqSense: NOT_READY cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3182 cmd->cmnd[0], dcb->target_id,
3183 dcb->target_lun, status, acb->scan_devices);
3184 break;
3185 case UNIT_ATTENTION:
3186 dprintkl(KERN_DEBUG,
3187 "ReqSense: UNIT_ATTENTION cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3188 cmd->cmnd[0], dcb->target_id,
3189 dcb->target_lun, status, acb->scan_devices);
3190 break;
3191 case ILLEGAL_REQUEST:
3192 dprintkl(KERN_DEBUG,
3193 "ReqSense: ILLEGAL_REQUEST cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3194 cmd->cmnd[0], dcb->target_id,
3195 dcb->target_lun, status, acb->scan_devices);
3196 break;
3197 case MEDIUM_ERROR:
3198 dprintkl(KERN_DEBUG,
3199 "ReqSense: MEDIUM_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3200 cmd->cmnd[0], dcb->target_id,
3201 dcb->target_lun, status, acb->scan_devices);
3202 break;
3203 case HARDWARE_ERROR:
3204 dprintkl(KERN_DEBUG,
3205 "ReqSense: HARDWARE_ERROR cmnd=0x%02x <%02i-%i> stat=%i scan=%i ",
3206 cmd->cmnd[0], dcb->target_id,
3207 dcb->target_lun, status, acb->scan_devices);
3208 break;
3209 }
3210 if (cmd->sense_buffer[7] >= 6)
3211 printk("sense=0x%02x ASC=0x%02x ASCQ=0x%02x "
3212 "(0x%08x 0x%08x)\n",
3213 cmd->sense_buffer[2], cmd->sense_buffer[12],
3214 cmd->sense_buffer[13],
3215 *((unsigned int *)(cmd->sense_buffer + 3)),
3216 *((unsigned int *)(cmd->sense_buffer + 8)));
3217 else
3218 printk("sense=0x%02x No ASC/ASCQ (0x%08x)\n",
3219 cmd->sense_buffer[2],
3220 *((unsigned int *)(cmd->sense_buffer + 3)));
3221 }
3222
3223 if (status == SAM_STAT_CHECK_CONDITION) {
3224 set_host_byte(cmd, DID_BAD_TARGET);
3225 goto ckc_e;
3226 }
3227 dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
3228
3229 set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
3230
3231 goto ckc_e;
3232 }
3233
3234
3235 if (status) {
3236
3237
3238
3239 if (status == SAM_STAT_CHECK_CONDITION) {
3240 request_sense(acb, dcb, srb);
3241 return;
3242 } else if (status == SAM_STAT_TASK_SET_FULL) {
3243 tempcnt = (u8)list_size(&dcb->srb_going_list);
3244 dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
3245 dcb->target_id, dcb->target_lun, tempcnt);
3246 if (tempcnt > 1)
3247 tempcnt--;
3248 dcb->max_command = tempcnt;
3249 free_tag(dcb, srb);
3250 list_move(&srb->list, &dcb->srb_waiting_list);
3251 waiting_set_timer(acb, HZ / 20);
3252 srb->adapter_status = 0;
3253 srb->target_status = 0;
3254 return;
3255 } else if (status == SCSI_STAT_SEL_TIMEOUT) {
3256 srb->adapter_status = H_SEL_TIMEOUT;
3257 srb->target_status = 0;
3258 set_host_byte(cmd, DID_NO_CONNECT);
3259 } else {
3260 srb->adapter_status = 0;
3261 set_host_byte(cmd, DID_ERROR);
3262 set_status_byte(cmd, status);
3263 }
3264 } else {
3265
3266
3267
3268 status = srb->adapter_status;
3269 if (status & H_OVER_UNDER_RUN) {
3270 srb->target_status = 0;
3271 scsi_msg_to_host_byte(cmd, srb->end_message);
3272 } else if (srb->status & PARITY_ERROR) {
3273 set_host_byte(cmd, DID_PARITY);
3274 } else {
3275
3276 srb->adapter_status = 0;
3277 srb->target_status = 0;
3278 }
3279 }
3280
3281 ckc_only = 0;
3282
3283 ckc_e:
3284
3285 pci_unmap_srb(acb, srb);
3286
3287 if (cmd->cmnd[0] == INQUIRY) {
3288 unsigned char *base = NULL;
3289 struct ScsiInqData *ptr;
3290 unsigned long flags = 0;
3291 struct scatterlist* sg = scsi_sglist(cmd);
3292 size_t offset = 0, len = sizeof(struct ScsiInqData);
3293
3294 local_irq_save(flags);
3295 base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
3296 ptr = (struct ScsiInqData *)(base + offset);
3297
3298 if (!ckc_only && get_host_byte(cmd) == DID_OK
3299 && cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
3300 && dir != DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
3301 dcb->inquiry7 = ptr->Flags;
3302
3303
3304
3305 if ((get_host_byte(cmd) == DID_OK) ||
3306 (get_status_byte(cmd) == SAM_STAT_CHECK_CONDITION)) {
3307 if (!dcb->init_tcq_flag) {
3308 add_dev(acb, dcb, ptr);
3309 dcb->init_tcq_flag = 1;
3310 }
3311 }
3312
3313 scsi_kunmap_atomic_sg(base);
3314 local_irq_restore(flags);
3315 }
3316
3317
3318 scsi_set_resid(cmd, srb->total_xfer_length);
3319
3320 cmd->SCp.this_residual = srb->total_xfer_length;
3321 cmd->SCp.buffers_residual = 0;
3322 if (debug_enabled(DBG_KG)) {
3323 if (srb->total_xfer_length)
3324 dprintkdbg(DBG_KG, "srb_done: (0x%p) <%02i-%i> "
3325 "cmnd=0x%02x Missed %i bytes\n",
3326 cmd, cmd->device->id, (u8)cmd->device->lun,
3327 cmd->cmnd[0], srb->total_xfer_length);
3328 }
3329
3330 if (srb != acb->tmp_srb) {
3331
3332 dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
3333 cmd, cmd->result);
3334 list_move_tail(&srb->list, &acb->srb_free_list);
3335 } else {
3336 dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
3337 }
3338
3339 cmd->scsi_done(cmd);
3340 waiting_process_next(acb);
3341}
3342
3343
3344
3345static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
3346 struct scsi_cmnd *cmd, u8 force)
3347{
3348 struct DeviceCtlBlk *dcb;
3349 dprintkl(KERN_INFO, "doing_srb_done: pids ");
3350
3351 list_for_each_entry(dcb, &acb->dcb_list, list) {
3352 struct ScsiReqBlk *srb;
3353 struct ScsiReqBlk *tmp;
3354 struct scsi_cmnd *p;
3355
3356 list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
3357 p = srb->cmd;
3358 printk("G:%p(%02i-%i) ", p,
3359 p->device->id, (u8)p->device->lun);
3360 list_del(&srb->list);
3361 free_tag(dcb, srb);
3362 list_add_tail(&srb->list, &acb->srb_free_list);
3363 set_host_byte(p, did_flag);
3364 set_status_byte(p, SAM_STAT_GOOD);
3365 pci_unmap_srb_sense(acb, srb);
3366 pci_unmap_srb(acb, srb);
3367 if (force) {
3368
3369
3370 p->scsi_done(p);
3371 }
3372 }
3373 if (!list_empty(&dcb->srb_going_list))
3374 dprintkl(KERN_DEBUG,
3375 "How could the ML send cmnds to the Going queue? <%02i-%i>\n",
3376 dcb->target_id, dcb->target_lun);
3377 if (dcb->tag_mask)
3378 dprintkl(KERN_DEBUG,
3379 "tag_mask for <%02i-%i> should be empty, is %08x!\n",
3380 dcb->target_id, dcb->target_lun,
3381 dcb->tag_mask);
3382
3383
3384 list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
3385 p = srb->cmd;
3386
3387 printk("W:%p<%02i-%i>", p, p->device->id,
3388 (u8)p->device->lun);
3389 list_move_tail(&srb->list, &acb->srb_free_list);
3390 set_host_byte(p, did_flag);
3391 set_status_byte(p, SAM_STAT_GOOD);
3392 pci_unmap_srb_sense(acb, srb);
3393 pci_unmap_srb(acb, srb);
3394 if (force) {
3395
3396
3397 cmd->scsi_done(cmd);
3398 }
3399 }
3400 if (!list_empty(&dcb->srb_waiting_list))
3401 dprintkl(KERN_DEBUG, "ML queued %i cmnds again to <%02i-%i>\n",
3402 list_size(&dcb->srb_waiting_list), dcb->target_id,
3403 dcb->target_lun);
3404 dcb->flag &= ~ABORT_DEV_;
3405 }
3406 printk("\n");
3407}
3408
3409
3410static void reset_scsi_bus(struct AdapterCtlBlk *acb)
3411{
3412 dprintkdbg(DBG_0, "reset_scsi_bus: acb=%p\n", acb);
3413 acb->acb_flag |= RESET_DEV;
3414 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
3415
3416 while (!(DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS) & INT_SCSIRESET))
3417 ;
3418}
3419
3420
3421static void set_basic_config(struct AdapterCtlBlk *acb)
3422{
3423 u8 bval;
3424 u16 wval;
3425 DC395x_write8(acb, TRM_S1040_SCSI_TIMEOUT, acb->sel_timeout);
3426 if (acb->config & HCC_PARITY)
3427 bval = PHASELATCH | INITIATOR | BLOCKRST | PARITYCHECK;
3428 else
3429 bval = PHASELATCH | INITIATOR | BLOCKRST;
3430
3431 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG0, bval);
3432
3433
3434 DC395x_write8(acb, TRM_S1040_SCSI_CONFIG1, 0x03);
3435
3436 DC395x_write8(acb, TRM_S1040_SCSI_HOSTID, acb->scsi_host->this_id);
3437
3438 DC395x_write8(acb, TRM_S1040_SCSI_OFFSET, 0x00);
3439
3440 wval = DC395x_read16(acb, TRM_S1040_GEN_CONTROL) & 0x7F;
3441 DC395x_write16(acb, TRM_S1040_GEN_CONTROL, wval);
3442
3443 wval = DC395x_read16(acb, TRM_S1040_DMA_CONFIG) & ~DMA_FIFO_CTRL;
3444 wval |=
3445 DMA_FIFO_HALF_HALF | DMA_ENHANCE ;
3446 DC395x_write16(acb, TRM_S1040_DMA_CONFIG, wval);
3447
3448 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
3449
3450 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x7F);
3451 DC395x_write8(acb, TRM_S1040_DMA_INTEN, EN_SCSIINTR | EN_DMAXFERERROR
3452
3453 );
3454}
3455
3456
3457static void scsi_reset_detect(struct AdapterCtlBlk *acb)
3458{
3459 dprintkl(KERN_INFO, "scsi_reset_detect: acb=%p\n", acb);
3460
3461 if (timer_pending(&acb->waiting_timer))
3462 del_timer(&acb->waiting_timer);
3463
3464 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
3465 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
3466
3467 udelay(500);
3468
3469 acb->last_reset =
3470 jiffies + 5 * HZ / 2 +
3471 HZ * acb->eeprom.delay_time;
3472
3473 clear_fifo(acb, "scsi_reset_detect");
3474 set_basic_config(acb);
3475
3476
3477
3478 if (acb->acb_flag & RESET_DEV) {
3479 acb->acb_flag |= RESET_DONE;
3480 } else {
3481 acb->acb_flag |= RESET_DETECT;
3482 reset_dev_param(acb);
3483 doing_srb_done(acb, DID_RESET, NULL, 1);
3484
3485 acb->active_dcb = NULL;
3486 acb->acb_flag = 0;
3487 waiting_process_next(acb);
3488 }
3489}
3490
3491
3492static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
3493 struct ScsiReqBlk *srb)
3494{
3495 struct scsi_cmnd *cmd = srb->cmd;
3496 dprintkdbg(DBG_1, "request_sense: (0x%p) <%02i-%i>\n",
3497 cmd, cmd->device->id, (u8)cmd->device->lun);
3498
3499 srb->flag |= AUTO_REQSENSE;
3500 srb->adapter_status = 0;
3501 srb->target_status = 0;
3502
3503
3504 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
3505
3506
3507 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].address =
3508 srb->segment_x[0].address;
3509 srb->segment_x[DC395x_MAX_SG_LISTENTRY - 1].length =
3510 srb->segment_x[0].length;
3511 srb->xferred = srb->total_xfer_length;
3512
3513 srb->total_xfer_length = SCSI_SENSE_BUFFERSIZE;
3514 srb->segment_x[0].length = SCSI_SENSE_BUFFERSIZE;
3515
3516 srb->segment_x[0].address = dma_map_single(&acb->dev->dev,
3517 cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE,
3518 DMA_FROM_DEVICE);
3519 dprintkdbg(DBG_SG, "request_sense: map buffer %p->%08x(%05x)\n",
3520 cmd->sense_buffer, srb->segment_x[0].address,
3521 SCSI_SENSE_BUFFERSIZE);
3522 srb->sg_count = 1;
3523 srb->sg_index = 0;
3524
3525 if (start_scsi(acb, dcb, srb)) {
3526 dprintkl(KERN_DEBUG,
3527 "request_sense: (0x%p) failed <%02i-%i>\n",
3528 srb->cmd, dcb->target_id, dcb->target_lun);
3529 list_move(&srb->list, &dcb->srb_waiting_list);
3530 waiting_set_timer(acb, HZ / 100);
3531 }
3532}
3533
3534
3535
3536
3537
3538
3539
3540
3541
3542
3543
3544
3545
3546
3547
3548static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
3549 u8 target, u8 lun)
3550{
3551 struct NvRamType *eeprom = &acb->eeprom;
3552 u8 period_index = eeprom->target[target].period & 0x07;
3553 struct DeviceCtlBlk *dcb;
3554
3555 dcb = kmalloc(sizeof(struct DeviceCtlBlk), GFP_ATOMIC);
3556 dprintkdbg(DBG_0, "device_alloc: <%02i-%i>\n", target, lun);
3557 if (!dcb)
3558 return NULL;
3559 dcb->acb = NULL;
3560 INIT_LIST_HEAD(&dcb->srb_going_list);
3561 INIT_LIST_HEAD(&dcb->srb_waiting_list);
3562 dcb->active_srb = NULL;
3563 dcb->tag_mask = 0;
3564 dcb->max_command = 1;
3565 dcb->target_id = target;
3566 dcb->target_lun = lun;
3567 dcb->dev_mode = eeprom->target[target].cfg0;
3568#ifndef DC395x_NO_DISCONNECT
3569 dcb->identify_msg =
3570 IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
3571#else
3572 dcb->identify_msg = IDENTIFY(0, lun);
3573#endif
3574 dcb->inquiry7 = 0;
3575 dcb->sync_mode = 0;
3576 dcb->min_nego_period = clock_period[period_index];
3577 dcb->sync_period = 0;
3578 dcb->sync_offset = 0;
3579 dcb->flag = 0;
3580
3581#ifndef DC395x_NO_WIDE
3582 if ((dcb->dev_mode & NTC_DO_WIDE_NEGO)
3583 && (acb->config & HCC_WIDE_CARD))
3584 dcb->sync_mode |= WIDE_NEGO_ENABLE;
3585#endif
3586#ifndef DC395x_NO_SYNC
3587 if (dcb->dev_mode & NTC_DO_SYNC_NEGO)
3588 if (!(lun) || current_sync_offset)
3589 dcb->sync_mode |= SYNC_NEGO_ENABLE;
3590#endif
3591 if (dcb->target_lun != 0) {
3592
3593 struct DeviceCtlBlk *p;
3594 list_for_each_entry(p, &acb->dcb_list, list)
3595 if (p->target_id == dcb->target_id)
3596 break;
3597 dprintkdbg(DBG_1,
3598 "device_alloc: <%02i-%i> copy from <%02i-%i>\n",
3599 dcb->target_id, dcb->target_lun,
3600 p->target_id, p->target_lun);
3601 dcb->sync_mode = p->sync_mode;
3602 dcb->sync_period = p->sync_period;
3603 dcb->min_nego_period = p->min_nego_period;
3604 dcb->sync_offset = p->sync_offset;
3605 dcb->inquiry7 = p->inquiry7;
3606 }
3607 return dcb;
3608}
3609
3610
3611
3612
3613
3614
3615
3616
3617static void adapter_add_device(struct AdapterCtlBlk *acb,
3618 struct DeviceCtlBlk *dcb)
3619{
3620
3621 dcb->acb = acb;
3622
3623
3624 if (list_empty(&acb->dcb_list))
3625 acb->dcb_run_robin = dcb;
3626
3627
3628 list_add_tail(&dcb->list, &acb->dcb_list);
3629
3630
3631 acb->dcb_map[dcb->target_id] |= (1 << dcb->target_lun);
3632 acb->children[dcb->target_id][dcb->target_lun] = dcb;
3633}
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645static void adapter_remove_device(struct AdapterCtlBlk *acb,
3646 struct DeviceCtlBlk *dcb)
3647{
3648 struct DeviceCtlBlk *i;
3649 struct DeviceCtlBlk *tmp;
3650 dprintkdbg(DBG_0, "adapter_remove_device: <%02i-%i>\n",
3651 dcb->target_id, dcb->target_lun);
3652
3653
3654 if (acb->active_dcb == dcb)
3655 acb->active_dcb = NULL;
3656 if (acb->dcb_run_robin == dcb)
3657 acb->dcb_run_robin = dcb_get_next(&acb->dcb_list, dcb);
3658
3659
3660 list_for_each_entry_safe(i, tmp, &acb->dcb_list, list)
3661 if (dcb == i) {
3662 list_del(&i->list);
3663 break;
3664 }
3665
3666
3667 acb->dcb_map[dcb->target_id] &= ~(1 << dcb->target_lun);
3668 acb->children[dcb->target_id][dcb->target_lun] = NULL;
3669 dcb->acb = NULL;
3670}
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680static void adapter_remove_and_free_device(struct AdapterCtlBlk *acb,
3681 struct DeviceCtlBlk *dcb)
3682{
3683 if (list_size(&dcb->srb_going_list) > 1) {
3684 dprintkdbg(DBG_1, "adapter_remove_and_free_device: <%02i-%i> "
3685 "Won't remove because of %i active requests.\n",
3686 dcb->target_id, dcb->target_lun,
3687 list_size(&dcb->srb_going_list));
3688 return;
3689 }
3690 adapter_remove_device(acb, dcb);
3691 kfree(dcb);
3692}
3693
3694
3695
3696
3697
3698
3699
3700
3701static void adapter_remove_and_free_all_devices(struct AdapterCtlBlk* acb)
3702{
3703 struct DeviceCtlBlk *dcb;
3704 struct DeviceCtlBlk *tmp;
3705 dprintkdbg(DBG_1, "adapter_remove_and_free_all_devices: num=%i\n",
3706 list_size(&acb->dcb_list));
3707
3708 list_for_each_entry_safe(dcb, tmp, &acb->dcb_list, list)
3709 adapter_remove_and_free_device(acb, dcb);
3710}
3711
3712
3713
3714
3715
3716
3717
3718
3719
3720static int dc395x_slave_alloc(struct scsi_device *scsi_device)
3721{
3722 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
3723 struct DeviceCtlBlk *dcb;
3724
3725 dcb = device_alloc(acb, scsi_device->id, scsi_device->lun);
3726 if (!dcb)
3727 return -ENOMEM;
3728 adapter_add_device(acb, dcb);
3729
3730 return 0;
3731}
3732
3733
3734
3735
3736
3737
3738
3739
3740static void dc395x_slave_destroy(struct scsi_device *scsi_device)
3741{
3742 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)scsi_device->host->hostdata;
3743 struct DeviceCtlBlk *dcb = find_dcb(acb, scsi_device->id, scsi_device->lun);
3744 if (dcb)
3745 adapter_remove_and_free_device(acb, dcb);
3746}
3747
3748
3749
3750
3751
3752
3753
3754
3755
3756
3757
3758static void trms1040_wait_30us(unsigned long io_port)
3759{
3760
3761 outb(5, io_port + TRM_S1040_GEN_TIMER);
3762 while (!(inb(io_port + TRM_S1040_GEN_STATUS) & GTIMEOUT))
3763 ;
3764}
3765
3766
3767
3768
3769
3770
3771
3772
3773
3774
3775static void trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
3776{
3777 int i;
3778 u8 send_data;
3779
3780
3781 for (i = 0; i < 3; i++, cmd <<= 1) {
3782 send_data = NVR_SELECT;
3783 if (cmd & 0x04)
3784 send_data |= NVR_BITOUT;
3785
3786 outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
3787 trms1040_wait_30us(io_port);
3788 outb((send_data | NVR_CLOCK),
3789 io_port + TRM_S1040_GEN_NVRAM);
3790 trms1040_wait_30us(io_port);
3791 }
3792
3793
3794 for (i = 0; i < 7; i++, addr <<= 1) {
3795 send_data = NVR_SELECT;
3796 if (addr & 0x40)
3797 send_data |= NVR_BITOUT;
3798
3799 outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
3800 trms1040_wait_30us(io_port);
3801 outb((send_data | NVR_CLOCK),
3802 io_port + TRM_S1040_GEN_NVRAM);
3803 trms1040_wait_30us(io_port);
3804 }
3805 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3806 trms1040_wait_30us(io_port);
3807}
3808
3809
3810
3811
3812
3813
3814
3815
3816
3817
3818
3819
3820static void trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
3821{
3822 int i;
3823 u8 send_data;
3824
3825
3826 trms1040_write_cmd(io_port, 0x05, addr);
3827
3828
3829 for (i = 0; i < 8; i++, byte <<= 1) {
3830 send_data = NVR_SELECT;
3831 if (byte & 0x80)
3832 send_data |= NVR_BITOUT;
3833
3834 outb(send_data, io_port + TRM_S1040_GEN_NVRAM);
3835 trms1040_wait_30us(io_port);
3836 outb((send_data | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
3837 trms1040_wait_30us(io_port);
3838 }
3839 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3840 trms1040_wait_30us(io_port);
3841
3842
3843 outb(0, io_port + TRM_S1040_GEN_NVRAM);
3844 trms1040_wait_30us(io_port);
3845
3846 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3847 trms1040_wait_30us(io_port);
3848
3849
3850 while (1) {
3851 outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
3852 trms1040_wait_30us(io_port);
3853
3854 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3855 trms1040_wait_30us(io_port);
3856
3857 if (inb(io_port + TRM_S1040_GEN_NVRAM) & NVR_BITIN)
3858 break;
3859 }
3860
3861
3862 outb(0, io_port + TRM_S1040_GEN_NVRAM);
3863}
3864
3865
3866
3867
3868
3869
3870
3871
3872
3873
3874static void trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
3875{
3876 u8 *b_eeprom = (u8 *)eeprom;
3877 u8 addr;
3878
3879
3880 outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
3881 io_port + TRM_S1040_GEN_CONTROL);
3882
3883
3884 trms1040_write_cmd(io_port, 0x04, 0xFF);
3885 outb(0, io_port + TRM_S1040_GEN_NVRAM);
3886 trms1040_wait_30us(io_port);
3887
3888
3889 for (addr = 0; addr < 128; addr++, b_eeprom++)
3890 trms1040_set_data(io_port, addr, *b_eeprom);
3891
3892
3893 trms1040_write_cmd(io_port, 0x04, 0x00);
3894 outb(0, io_port + TRM_S1040_GEN_NVRAM);
3895 trms1040_wait_30us(io_port);
3896
3897
3898 outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
3899 io_port + TRM_S1040_GEN_CONTROL);
3900}
3901
3902
3903
3904
3905
3906
3907
3908
3909
3910
3911
3912
3913
3914static u8 trms1040_get_data(unsigned long io_port, u8 addr)
3915{
3916 int i;
3917 u8 read_byte;
3918 u8 result = 0;
3919
3920
3921 trms1040_write_cmd(io_port, 0x06, addr);
3922
3923
3924 for (i = 0; i < 8; i++) {
3925 outb((NVR_SELECT | NVR_CLOCK), io_port + TRM_S1040_GEN_NVRAM);
3926 trms1040_wait_30us(io_port);
3927 outb(NVR_SELECT, io_port + TRM_S1040_GEN_NVRAM);
3928
3929
3930 read_byte = inb(io_port + TRM_S1040_GEN_NVRAM);
3931 result <<= 1;
3932 if (read_byte & NVR_BITIN)
3933 result |= 1;
3934
3935 trms1040_wait_30us(io_port);
3936 }
3937
3938
3939 outb(0, io_port + TRM_S1040_GEN_NVRAM);
3940 return result;
3941}
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952static void trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
3953{
3954 u8 *b_eeprom = (u8 *)eeprom;
3955 u8 addr;
3956
3957
3958 outb((inb(io_port + TRM_S1040_GEN_CONTROL) | EN_EEPROM),
3959 io_port + TRM_S1040_GEN_CONTROL);
3960
3961
3962 for (addr = 0; addr < 128; addr++, b_eeprom++)
3963 *b_eeprom = trms1040_get_data(io_port, addr);
3964
3965
3966 outb((inb(io_port + TRM_S1040_GEN_CONTROL) & ~EN_EEPROM),
3967 io_port + TRM_S1040_GEN_CONTROL);
3968}
3969
3970
3971
3972
3973
3974
3975
3976
3977
3978
3979
3980
3981
3982static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
3983{
3984 u16 *w_eeprom = (u16 *)eeprom;
3985 u16 w_addr;
3986 u16 cksum;
3987 u32 d_addr;
3988 u32 *d_eeprom;
3989
3990 trms1040_read_all(eeprom, io_port);
3991
3992 cksum = 0;
3993 for (w_addr = 0, w_eeprom = (u16 *)eeprom; w_addr < 64;
3994 w_addr++, w_eeprom++)
3995 cksum += *w_eeprom;
3996 if (cksum != 0x1234) {
3997
3998
3999
4000
4001 dprintkl(KERN_WARNING,
4002 "EEProm checksum error: using default values and options.\n");
4003 eeprom->sub_vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
4004 eeprom->sub_vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
4005 eeprom->sub_sys_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
4006 eeprom->sub_sys_id[1] =
4007 (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
4008 eeprom->sub_class = 0x00;
4009 eeprom->vendor_id[0] = (u8)PCI_VENDOR_ID_TEKRAM;
4010 eeprom->vendor_id[1] = (u8)(PCI_VENDOR_ID_TEKRAM >> 8);
4011 eeprom->device_id[0] = (u8)PCI_DEVICE_ID_TEKRAM_TRMS1040;
4012 eeprom->device_id[1] =
4013 (u8)(PCI_DEVICE_ID_TEKRAM_TRMS1040 >> 8);
4014 eeprom->reserved = 0x00;
4015
4016 for (d_addr = 0, d_eeprom = (u32 *)eeprom->target;
4017 d_addr < 16; d_addr++, d_eeprom++)
4018 *d_eeprom = 0x00000077;
4019
4020 *d_eeprom++ = 0x04000F07;
4021 *d_eeprom++ = 0x00000015;
4022 for (d_addr = 0; d_addr < 12; d_addr++, d_eeprom++)
4023 *d_eeprom = 0x00;
4024
4025
4026 set_safe_settings();
4027 fix_settings();
4028 eeprom_override(eeprom);
4029
4030 eeprom->cksum = 0x00;
4031 for (w_addr = 0, cksum = 0, w_eeprom = (u16 *)eeprom;
4032 w_addr < 63; w_addr++, w_eeprom++)
4033 cksum += *w_eeprom;
4034
4035 *w_eeprom = 0x1234 - cksum;
4036 trms1040_write_all(eeprom, io_port);
4037 eeprom->delay_time = cfg_data[CFG_RESET_DELAY].value;
4038 } else {
4039 set_safe_settings();
4040 eeprom_index_to_delay(eeprom);
4041 eeprom_override(eeprom);
4042 }
4043}
4044
4045
4046
4047
4048
4049
4050
4051
4052static void print_eeprom_settings(struct NvRamType *eeprom)
4053{
4054 dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
4055 eeprom->scsi_id,
4056 eeprom->target[0].period,
4057 clock_speed[eeprom->target[0].period] / 10,
4058 clock_speed[eeprom->target[0].period] % 10,
4059 eeprom->target[0].cfg0);
4060 dprintkl(KERN_INFO, " AdaptMode=0x%02x, Tags=%i(%02i), DelayReset=%is\n",
4061 eeprom->channel_cfg, eeprom->max_tag,
4062 1 << eeprom->max_tag, eeprom->delay_time);
4063}
4064
4065
4066
4067static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
4068{
4069 int i;
4070 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
4071
4072 for (i = 0; i < DC395x_MAX_SRB_CNT; i += srbs_per_page)
4073 kfree(acb->srb_array[i].segment_x);
4074}
4075
4076
4077
4078
4079
4080static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
4081{
4082 const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1)
4083 *SEGMENTX_LEN;
4084 int pages = (mem_needed+(PAGE_SIZE-1))/PAGE_SIZE;
4085 const unsigned srbs_per_page = PAGE_SIZE/SEGMENTX_LEN;
4086 int srb_idx = 0;
4087 unsigned i = 0;
4088 struct SGentry *ptr;
4089
4090 for (i = 0; i < DC395x_MAX_SRB_CNT; i++)
4091 acb->srb_array[i].segment_x = NULL;
4092
4093 dprintkdbg(DBG_1, "Allocate %i pages for SG tables\n", pages);
4094 while (pages--) {
4095 ptr = kmalloc(PAGE_SIZE, GFP_KERNEL);
4096 if (!ptr) {
4097 adapter_sg_tables_free(acb);
4098 return 1;
4099 }
4100 dprintkdbg(DBG_1, "Allocate %li bytes at %p for SG segments %i\n",
4101 PAGE_SIZE, ptr, srb_idx);
4102 i = 0;
4103 while (i < srbs_per_page && srb_idx < DC395x_MAX_SRB_CNT)
4104 acb->srb_array[srb_idx++].segment_x =
4105 ptr + (i++ * DC395x_MAX_SG_LISTENTRY);
4106 }
4107 if (i < srbs_per_page)
4108 acb->srb.segment_x =
4109 ptr + (i * DC395x_MAX_SG_LISTENTRY);
4110 else
4111 dprintkl(KERN_DEBUG, "No space for tmsrb SG table reserved?!\n");
4112 return 0;
4113}
4114
4115
4116
4117
4118
4119
4120
4121
4122
4123
4124
4125
4126static void adapter_print_config(struct AdapterCtlBlk *acb)
4127{
4128 u8 bval;
4129
4130 bval = DC395x_read8(acb, TRM_S1040_GEN_STATUS);
4131 dprintkl(KERN_INFO, "%sConnectors: ",
4132 ((bval & WIDESCSI) ? "(Wide) " : ""));
4133 if (!(bval & CON5068))
4134 printk("ext%s ", !(bval & EXT68HIGH) ? "68" : "50");
4135 if (!(bval & CON68))
4136 printk("int68%s ", !(bval & INT68HIGH) ? "" : "(50)");
4137 if (!(bval & CON50))
4138 printk("int50 ");
4139 if ((bval & (CON5068 | CON50 | CON68)) ==
4140 0 )
4141 printk(" Oops! (All 3?) ");
4142 bval = DC395x_read8(acb, TRM_S1040_GEN_CONTROL);
4143 printk(" Termination: ");
4144 if (bval & DIS_TERM)
4145 printk("Disabled\n");
4146 else {
4147 if (bval & AUTOTERM)
4148 printk("Auto ");
4149 if (bval & LOW8TERM)
4150 printk("Low ");
4151 if (bval & UP8TERM)
4152 printk("High ");
4153 printk("\n");
4154 }
4155}
4156
4157
4158
4159
4160
4161
4162
4163
4164
4165
4166
4167
4168
4169
4170static void adapter_init_params(struct AdapterCtlBlk *acb)
4171{
4172 struct NvRamType *eeprom = &acb->eeprom;
4173 int i;
4174
4175
4176
4177
4178
4179 INIT_LIST_HEAD(&acb->dcb_list);
4180 acb->dcb_run_robin = NULL;
4181 acb->active_dcb = NULL;
4182
4183 INIT_LIST_HEAD(&acb->srb_free_list);
4184
4185 acb->tmp_srb = &acb->srb;
4186 timer_setup(&acb->waiting_timer, waiting_timeout, 0);
4187 timer_setup(&acb->selto_timer, NULL, 0);
4188
4189 acb->srb_count = DC395x_MAX_SRB_CNT;
4190
4191 acb->sel_timeout = DC395x_SEL_TIMEOUT;
4192
4193
4194 acb->tag_max_num = 1 << eeprom->max_tag;
4195 if (acb->tag_max_num > 30)
4196 acb->tag_max_num = 30;
4197
4198 acb->acb_flag = 0;
4199 acb->gmode2 = eeprom->channel_cfg;
4200 acb->config = 0;
4201
4202 if (eeprom->channel_cfg & NAC_SCANLUN)
4203 acb->lun_chk = 1;
4204 acb->scan_devices = 1;
4205
4206 acb->scsi_host->this_id = eeprom->scsi_id;
4207 acb->hostid_bit = (1 << acb->scsi_host->this_id);
4208
4209 for (i = 0; i < DC395x_MAX_SCSI_ID; i++)
4210 acb->dcb_map[i] = 0;
4211
4212 acb->msg_len = 0;
4213
4214
4215 for (i = 0; i < acb->srb_count - 1; i++)
4216 list_add_tail(&acb->srb_array[i].list, &acb->srb_free_list);
4217}
4218
4219
4220
4221
4222
4223
4224
4225
4226
4227
4228
4229
4230
4231
4232static void adapter_init_scsi_host(struct Scsi_Host *host)
4233{
4234 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
4235 struct NvRamType *eeprom = &acb->eeprom;
4236
4237 host->max_cmd_len = 24;
4238 host->can_queue = DC395x_MAX_CMD_QUEUE;
4239 host->cmd_per_lun = DC395x_MAX_CMD_PER_LUN;
4240 host->this_id = (int)eeprom->scsi_id;
4241 host->io_port = acb->io_port_base;
4242 host->n_io_port = acb->io_port_len;
4243 host->dma_channel = -1;
4244 host->unique_id = acb->io_port_base;
4245 host->irq = acb->irq_level;
4246 acb->last_reset = jiffies;
4247
4248 host->max_id = 16;
4249 if (host->max_id - 1 == eeprom->scsi_id)
4250 host->max_id--;
4251
4252 if (eeprom->channel_cfg & NAC_SCANLUN)
4253 host->max_lun = 8;
4254 else
4255 host->max_lun = 1;
4256}
4257
4258
4259
4260
4261
4262
4263
4264
4265
4266
4267
4268static void adapter_init_chip(struct AdapterCtlBlk *acb)
4269{
4270 struct NvRamType *eeprom = &acb->eeprom;
4271
4272
4273 DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0x00);
4274 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0x00);
4275
4276
4277 DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_RSTMODULE);
4278
4279
4280 DC395x_write8(acb, TRM_S1040_DMA_CONTROL, DMARESETMODULE);
4281 udelay(20);
4282
4283
4284 acb->config = HCC_AUTOTERM | HCC_PARITY;
4285 if (DC395x_read8(acb, TRM_S1040_GEN_STATUS) & WIDESCSI)
4286 acb->config |= HCC_WIDE_CARD;
4287
4288 if (eeprom->channel_cfg & NAC_POWERON_SCSI_RESET)
4289 acb->config |= HCC_SCSI_RESET;
4290
4291 if (acb->config & HCC_SCSI_RESET) {
4292 dprintkl(KERN_INFO, "Performing initial SCSI bus reset\n");
4293 DC395x_write8(acb, TRM_S1040_SCSI_CONTROL, DO_RSTSCSI);
4294
4295
4296
4297 udelay(500);
4298
4299 acb->last_reset =
4300 jiffies + HZ / 2 +
4301 HZ * acb->eeprom.delay_time;
4302
4303
4304 }
4305}
4306
4307
4308
4309
4310
4311
4312
4313
4314
4315
4316
4317
4318
4319
4320
4321
4322static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
4323 u32 io_port_len, unsigned int irq)
4324{
4325 if (!request_region(io_port, io_port_len, DC395X_NAME)) {
4326 dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
4327 goto failed;
4328 }
4329
4330 acb->io_port_base = io_port;
4331 acb->io_port_len = io_port_len;
4332
4333 if (request_irq(irq, dc395x_interrupt, IRQF_SHARED, DC395X_NAME, acb)) {
4334
4335 dprintkl(KERN_INFO, "Failed to register IRQ\n");
4336 goto failed;
4337 }
4338
4339 acb->irq_level = irq;
4340
4341
4342 check_eeprom(&acb->eeprom, io_port);
4343 print_eeprom_settings(&acb->eeprom);
4344
4345
4346 adapter_init_params(acb);
4347
4348
4349 adapter_print_config(acb);
4350
4351 if (adapter_sg_tables_alloc(acb)) {
4352 dprintkl(KERN_DEBUG, "Memory allocation for SG tables failed\n");
4353 goto failed;
4354 }
4355 adapter_init_scsi_host(acb->scsi_host);
4356 adapter_init_chip(acb);
4357 set_basic_config(acb);
4358
4359 dprintkdbg(DBG_0,
4360 "adapter_init: acb=%p, pdcb_map=%p psrb_array=%p "
4361 "size{acb=0x%04x dcb=0x%04x srb=0x%04x}\n",
4362 acb, acb->dcb_map, acb->srb_array, sizeof(struct AdapterCtlBlk),
4363 sizeof(struct DeviceCtlBlk), sizeof(struct ScsiReqBlk));
4364 return 0;
4365
4366failed:
4367 if (acb->irq_level)
4368 free_irq(acb->irq_level, acb);
4369 if (acb->io_port_base)
4370 release_region(acb->io_port_base, acb->io_port_len);
4371 adapter_sg_tables_free(acb);
4372
4373 return 1;
4374}
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384static void adapter_uninit_chip(struct AdapterCtlBlk *acb)
4385{
4386
4387 DC395x_write8(acb, TRM_S1040_DMA_INTEN, 0);
4388 DC395x_write8(acb, TRM_S1040_SCSI_INTEN, 0);
4389
4390
4391 if (acb->config & HCC_SCSI_RESET)
4392 reset_scsi_bus(acb);
4393
4394
4395 DC395x_read8(acb, TRM_S1040_SCSI_INTSTATUS);
4396}
4397
4398
4399
4400
4401
4402
4403
4404
4405
4406
4407static void adapter_uninit(struct AdapterCtlBlk *acb)
4408{
4409 unsigned long flags;
4410 DC395x_LOCK_IO(acb->scsi_host, flags);
4411
4412
4413 if (timer_pending(&acb->waiting_timer))
4414 del_timer(&acb->waiting_timer);
4415 if (timer_pending(&acb->selto_timer))
4416 del_timer(&acb->selto_timer);
4417
4418 adapter_uninit_chip(acb);
4419 adapter_remove_and_free_all_devices(acb);
4420 DC395x_UNLOCK_IO(acb->scsi_host, flags);
4421
4422 if (acb->irq_level)
4423 free_irq(acb->irq_level, acb);
4424 if (acb->io_port_base)
4425 release_region(acb->io_port_base, acb->io_port_len);
4426
4427 adapter_sg_tables_free(acb);
4428}
4429
4430
4431#undef YESNO
4432#define YESNO(YN) \
4433 if (YN) seq_printf(m, " Yes ");\
4434 else seq_printf(m, " No ")
4435
4436static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
4437{
4438 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
4439 int spd, spd1;
4440 struct DeviceCtlBlk *dcb;
4441 unsigned long flags;
4442 int dev;
4443
4444 seq_puts(m, DC395X_BANNER " PCI SCSI Host Adapter\n"
4445 " Driver Version " DC395X_VERSION "\n");
4446
4447 DC395x_LOCK_IO(acb->scsi_host, flags);
4448
4449 seq_printf(m, "SCSI Host Nr %i, ", host->host_no);
4450 seq_printf(m, "DC395U/UW/F DC315/U %s\n",
4451 (acb->config & HCC_WIDE_CARD) ? "Wide" : "");
4452 seq_printf(m, "io_port_base 0x%04lx, ", acb->io_port_base);
4453 seq_printf(m, "irq_level 0x%04x, ", acb->irq_level);
4454 seq_printf(m, " SelTimeout %ims\n", (1638 * acb->sel_timeout) / 1000);
4455
4456 seq_printf(m, "MaxID %i, MaxLUN %llu, ", host->max_id, host->max_lun);
4457 seq_printf(m, "AdapterID %i\n", host->this_id);
4458
4459 seq_printf(m, "tag_max_num %i", acb->tag_max_num);
4460
4461 seq_printf(m, ", FilterCfg 0x%02x",
4462 DC395x_read8(acb, TRM_S1040_SCSI_CONFIG1));
4463 seq_printf(m, ", DelayReset %is\n", acb->eeprom.delay_time);
4464
4465
4466 seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list));
4467 seq_printf(m, "Map of attached LUNs: %8ph\n", &acb->dcb_map[0]);
4468 seq_printf(m, " %8ph\n", &acb->dcb_map[8]);
4469
4470 seq_puts(m,
4471 "Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
4472
4473 dev = 0;
4474 list_for_each_entry(dcb, &acb->dcb_list, list) {
4475 int nego_period;
4476 seq_printf(m, "%02i %02i %02i ", dev, dcb->target_id,
4477 dcb->target_lun);
4478 YESNO(dcb->dev_mode & NTC_DO_PARITY_CHK);
4479 YESNO(dcb->sync_offset);
4480 YESNO(dcb->sync_period & WIDE_SYNC);
4481 YESNO(dcb->dev_mode & NTC_DO_DISCONNECT);
4482 YESNO(dcb->dev_mode & NTC_DO_SEND_START);
4483 YESNO(dcb->sync_mode & EN_TAG_QUEUEING);
4484 nego_period = clock_period[dcb->sync_period & 0x07] << 2;
4485 if (dcb->sync_offset)
4486 seq_printf(m, " %03i ns ", nego_period);
4487 else
4488 seq_printf(m, " (%03i ns)", (dcb->min_nego_period << 2));
4489
4490 if (dcb->sync_offset & 0x0f) {
4491 spd = 1000 / (nego_period);
4492 spd1 = 1000 % (nego_period);
4493 spd1 = (spd1 * 10 + nego_period / 2) / (nego_period);
4494 seq_printf(m, " %2i.%1i M %02i ", spd, spd1,
4495 (dcb->sync_offset & 0x0f));
4496 } else
4497 seq_puts(m, " ");
4498
4499
4500 seq_printf(m, " %02i\n", dcb->max_command);
4501 dev++;
4502 }
4503
4504 if (timer_pending(&acb->waiting_timer))
4505 seq_puts(m, "Waiting queue timer running\n");
4506 else
4507 seq_putc(m, '\n');
4508
4509 list_for_each_entry(dcb, &acb->dcb_list, list) {
4510 struct ScsiReqBlk *srb;
4511 if (!list_empty(&dcb->srb_waiting_list))
4512 seq_printf(m, "DCB (%02i-%i): Waiting: %i:",
4513 dcb->target_id, dcb->target_lun,
4514 list_size(&dcb->srb_waiting_list));
4515 list_for_each_entry(srb, &dcb->srb_waiting_list, list)
4516 seq_printf(m, " %p", srb->cmd);
4517 if (!list_empty(&dcb->srb_going_list))
4518 seq_printf(m, "\nDCB (%02i-%i): Going : %i:",
4519 dcb->target_id, dcb->target_lun,
4520 list_size(&dcb->srb_going_list));
4521 list_for_each_entry(srb, &dcb->srb_going_list, list)
4522 seq_printf(m, " %p", srb->cmd);
4523 if (!list_empty(&dcb->srb_waiting_list) || !list_empty(&dcb->srb_going_list))
4524 seq_putc(m, '\n');
4525 }
4526
4527 if (debug_enabled(DBG_1)) {
4528 seq_printf(m, "DCB list for ACB %p:\n", acb);
4529 list_for_each_entry(dcb, &acb->dcb_list, list) {
4530 seq_printf(m, "%p -> ", dcb);
4531 }
4532 seq_puts(m, "END\n");
4533 }
4534
4535 DC395x_UNLOCK_IO(acb->scsi_host, flags);
4536 return 0;
4537}
4538
4539
4540static struct scsi_host_template dc395x_driver_template = {
4541 .module = THIS_MODULE,
4542 .proc_name = DC395X_NAME,
4543 .show_info = dc395x_show_info,
4544 .name = DC395X_BANNER " " DC395X_VERSION,
4545 .queuecommand = dc395x_queue_command,
4546 .slave_alloc = dc395x_slave_alloc,
4547 .slave_destroy = dc395x_slave_destroy,
4548 .can_queue = DC395x_MAX_CAN_QUEUE,
4549 .this_id = 7,
4550 .sg_tablesize = DC395x_MAX_SG_TABLESIZE,
4551 .cmd_per_lun = DC395x_MAX_CMD_PER_LUN,
4552 .eh_abort_handler = dc395x_eh_abort,
4553 .eh_bus_reset_handler = dc395x_eh_bus_reset,
4554 .dma_boundary = PAGE_SIZE - 1,
4555};
4556
4557
4558
4559
4560
4561
4562static void banner_display(void)
4563{
4564 static int banner_done = 0;
4565 if (!banner_done)
4566 {
4567 dprintkl(KERN_INFO, "%s %s\n", DC395X_BANNER, DC395X_VERSION);
4568 banner_done = 1;
4569 }
4570}
4571
4572
4573
4574
4575
4576
4577
4578
4579
4580
4581
4582
4583
4584
4585
4586static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
4587{
4588 struct Scsi_Host *scsi_host = NULL;
4589 struct AdapterCtlBlk *acb = NULL;
4590 unsigned long io_port_base;
4591 unsigned int io_port_len;
4592 unsigned int irq;
4593
4594 dprintkdbg(DBG_0, "Init one instance (%s)\n", pci_name(dev));
4595 banner_display();
4596
4597 if (pci_enable_device(dev))
4598 {
4599 dprintkl(KERN_INFO, "PCI Enable device failed.\n");
4600 return -ENODEV;
4601 }
4602 io_port_base = pci_resource_start(dev, 0) & PCI_BASE_ADDRESS_IO_MASK;
4603 io_port_len = pci_resource_len(dev, 0);
4604 irq = dev->irq;
4605 dprintkdbg(DBG_0, "IO_PORT=0x%04lx, IRQ=0x%x\n", io_port_base, dev->irq);
4606
4607
4608 scsi_host = scsi_host_alloc(&dc395x_driver_template,
4609 sizeof(struct AdapterCtlBlk));
4610 if (!scsi_host) {
4611 dprintkl(KERN_INFO, "scsi_host_alloc failed\n");
4612 goto fail;
4613 }
4614 acb = (struct AdapterCtlBlk*)scsi_host->hostdata;
4615 acb->scsi_host = scsi_host;
4616 acb->dev = dev;
4617
4618
4619 if (adapter_init(acb, io_port_base, io_port_len, irq)) {
4620 dprintkl(KERN_INFO, "adapter init failed\n");
4621 goto fail;
4622 }
4623
4624 pci_set_master(dev);
4625
4626
4627 if (scsi_add_host(scsi_host, &dev->dev)) {
4628 dprintkl(KERN_ERR, "scsi_add_host failed\n");
4629 goto fail;
4630 }
4631 pci_set_drvdata(dev, scsi_host);
4632 scsi_scan_host(scsi_host);
4633
4634 return 0;
4635
4636fail:
4637 if (acb != NULL)
4638 adapter_uninit(acb);
4639 if (scsi_host != NULL)
4640 scsi_host_put(scsi_host);
4641 pci_disable_device(dev);
4642 return -ENODEV;
4643}
4644
4645
4646
4647
4648
4649
4650
4651
4652static void dc395x_remove_one(struct pci_dev *dev)
4653{
4654 struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
4655 struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
4656
4657 dprintkdbg(DBG_0, "dc395x_remove_one: acb=%p\n", acb);
4658
4659 scsi_remove_host(scsi_host);
4660 adapter_uninit(acb);
4661 pci_disable_device(dev);
4662 scsi_host_put(scsi_host);
4663}
4664
4665
4666static struct pci_device_id dc395x_pci_table[] = {
4667 {
4668 .vendor = PCI_VENDOR_ID_TEKRAM,
4669 .device = PCI_DEVICE_ID_TEKRAM_TRMS1040,
4670 .subvendor = PCI_ANY_ID,
4671 .subdevice = PCI_ANY_ID,
4672 },
4673 {}
4674};
4675MODULE_DEVICE_TABLE(pci, dc395x_pci_table);
4676
4677
4678static struct pci_driver dc395x_driver = {
4679 .name = DC395X_NAME,
4680 .id_table = dc395x_pci_table,
4681 .probe = dc395x_init_one,
4682 .remove = dc395x_remove_one,
4683};
4684module_pci_driver(dc395x_driver);
4685
4686MODULE_AUTHOR("C.L. Huang / Erich Chen / Kurt Garloff");
4687MODULE_DESCRIPTION("SCSI host adapter driver for Tekram TRM-S1040 based adapters: Tekram DC395 and DC315 series");
4688MODULE_LICENSE("GPL");
4689