1
2
3
4
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
16#include <linux/irqreturn.h>
17
18#include <asm/irq.h>
19#include <asm/io.h>
20#include <asm/dma.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_dbg.h>
28#include <scsi/scsi_transport_spi.h>
29
30#include "esp_scsi.h"
31
32#define DRV_MODULE_NAME "esp"
33#define PFX DRV_MODULE_NAME ": "
34#define DRV_VERSION "2.000"
35#define DRV_MODULE_RELDATE "April 19, 2007"
36
37
38static int esp_bus_reset_settle = 3;
39
40static u32 esp_debug;
41#define ESP_DEBUG_INTR 0x00000001
42#define ESP_DEBUG_SCSICMD 0x00000002
43#define ESP_DEBUG_RESET 0x00000004
44#define ESP_DEBUG_MSGIN 0x00000008
45#define ESP_DEBUG_MSGOUT 0x00000010
46#define ESP_DEBUG_CMDDONE 0x00000020
47#define ESP_DEBUG_DISCONNECT 0x00000040
48#define ESP_DEBUG_DATASTART 0x00000080
49#define ESP_DEBUG_DATADONE 0x00000100
50#define ESP_DEBUG_RECONNECT 0x00000200
51#define ESP_DEBUG_AUTOSENSE 0x00000400
52#define ESP_DEBUG_EVENT 0x00000800
53#define ESP_DEBUG_COMMAND 0x00001000
54
55#define esp_log_intr(f, a...) \
56do { if (esp_debug & ESP_DEBUG_INTR) \
57 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
58} while (0)
59
60#define esp_log_reset(f, a...) \
61do { if (esp_debug & ESP_DEBUG_RESET) \
62 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
63} while (0)
64
65#define esp_log_msgin(f, a...) \
66do { if (esp_debug & ESP_DEBUG_MSGIN) \
67 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
68} while (0)
69
70#define esp_log_msgout(f, a...) \
71do { if (esp_debug & ESP_DEBUG_MSGOUT) \
72 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
73} while (0)
74
75#define esp_log_cmddone(f, a...) \
76do { if (esp_debug & ESP_DEBUG_CMDDONE) \
77 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
78} while (0)
79
80#define esp_log_disconnect(f, a...) \
81do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
82 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
83} while (0)
84
85#define esp_log_datastart(f, a...) \
86do { if (esp_debug & ESP_DEBUG_DATASTART) \
87 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
88} while (0)
89
90#define esp_log_datadone(f, a...) \
91do { if (esp_debug & ESP_DEBUG_DATADONE) \
92 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
93} while (0)
94
95#define esp_log_reconnect(f, a...) \
96do { if (esp_debug & ESP_DEBUG_RECONNECT) \
97 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
98} while (0)
99
100#define esp_log_autosense(f, a...) \
101do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
102 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
103} while (0)
104
105#define esp_log_event(f, a...) \
106do { if (esp_debug & ESP_DEBUG_EVENT) \
107 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
108} while (0)
109
110#define esp_log_command(f, a...) \
111do { if (esp_debug & ESP_DEBUG_COMMAND) \
112 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
113} while (0)
114
115#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
116#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
117
118static void esp_log_fill_regs(struct esp *esp,
119 struct esp_event_ent *p)
120{
121 p->sreg = esp->sreg;
122 p->seqreg = esp->seqreg;
123 p->sreg2 = esp->sreg2;
124 p->ireg = esp->ireg;
125 p->select_state = esp->select_state;
126 p->event = esp->event;
127}
128
129void scsi_esp_cmd(struct esp *esp, u8 val)
130{
131 struct esp_event_ent *p;
132 int idx = esp->esp_event_cur;
133
134 p = &esp->esp_event_log[idx];
135 p->type = ESP_EVENT_TYPE_CMD;
136 p->val = val;
137 esp_log_fill_regs(esp, p);
138
139 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
140
141 esp_log_command("cmd[%02x]\n", val);
142 esp_write8(val, ESP_CMD);
143}
144EXPORT_SYMBOL(scsi_esp_cmd);
145
146static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
147{
148 if (esp->flags & ESP_FLAG_USE_FIFO) {
149 int i;
150
151 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
152 for (i = 0; i < len; i++)
153 esp_write8(esp->command_block[i], ESP_FDATA);
154 scsi_esp_cmd(esp, cmd);
155 } else {
156 if (esp->rev == FASHME)
157 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
158 cmd |= ESP_CMD_DMA;
159 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
160 len, max_len, 0, cmd);
161 }
162}
163
164static void esp_event(struct esp *esp, u8 val)
165{
166 struct esp_event_ent *p;
167 int idx = esp->esp_event_cur;
168
169 p = &esp->esp_event_log[idx];
170 p->type = ESP_EVENT_TYPE_EVENT;
171 p->val = val;
172 esp_log_fill_regs(esp, p);
173
174 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
175
176 esp->event = val;
177}
178
179static void esp_dump_cmd_log(struct esp *esp)
180{
181 int idx = esp->esp_event_cur;
182 int stop = idx;
183
184 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
185 do {
186 struct esp_event_ent *p = &esp->esp_event_log[idx];
187
188 shost_printk(KERN_INFO, esp->host,
189 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
190 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
191 idx,
192 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
193 p->val, p->sreg, p->seqreg,
194 p->sreg2, p->ireg, p->select_state, p->event);
195
196 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
197 } while (idx != stop);
198}
199
200static void esp_flush_fifo(struct esp *esp)
201{
202 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
203 if (esp->rev == ESP236) {
204 int lim = 1000;
205
206 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
207 if (--lim == 0) {
208 shost_printk(KERN_ALERT, esp->host,
209 "ESP_FF_BYTES will not clear!\n");
210 break;
211 }
212 udelay(1);
213 }
214 }
215}
216
217static void hme_read_fifo(struct esp *esp)
218{
219 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
220 int idx = 0;
221
222 while (fcnt--) {
223 esp->fifo[idx++] = esp_read8(ESP_FDATA);
224 esp->fifo[idx++] = esp_read8(ESP_FDATA);
225 }
226 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
227 esp_write8(0, ESP_FDATA);
228 esp->fifo[idx++] = esp_read8(ESP_FDATA);
229 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
230 }
231 esp->fifo_cnt = idx;
232}
233
234static void esp_set_all_config3(struct esp *esp, u8 val)
235{
236 int i;
237
238 for (i = 0; i < ESP_MAX_TARGET; i++)
239 esp->target[i].esp_config3 = val;
240}
241
242
243static void esp_reset_esp(struct esp *esp)
244{
245 u8 family_code, version;
246
247
248 scsi_esp_cmd(esp, ESP_CMD_RC);
249 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
250 if (esp->rev == FAST)
251 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
252 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
253
254
255
256
257 esp->max_period = ((35 * esp->ccycle) / 1000);
258 if (esp->rev == FAST) {
259 version = esp_read8(ESP_UID);
260 family_code = (version & 0xf8) >> 3;
261 if (family_code == 0x02)
262 esp->rev = FAS236;
263 else if (family_code == 0x0a)
264 esp->rev = FASHME;
265 else
266 esp->rev = FAS100A;
267 esp->min_period = ((4 * esp->ccycle) / 1000);
268 } else {
269 esp->min_period = ((5 * esp->ccycle) / 1000);
270 }
271 if (esp->rev == FAS236) {
272
273
274
275
276 u8 config4 = ESP_CONFIG4_GE1;
277 esp_write8(config4, ESP_CFG4);
278 config4 = esp_read8(ESP_CFG4);
279 if (config4 & ESP_CONFIG4_GE1) {
280 esp->rev = PCSCSI;
281 esp_write8(esp->config4, ESP_CFG4);
282 }
283 }
284 esp->max_period = (esp->max_period + 3)>>2;
285 esp->min_period = (esp->min_period + 3)>>2;
286
287 esp_write8(esp->config1, ESP_CFG1);
288 switch (esp->rev) {
289 case ESP100:
290
291 break;
292
293 case ESP100A:
294 esp_write8(esp->config2, ESP_CFG2);
295 break;
296
297 case ESP236:
298
299 esp_write8(esp->config2, ESP_CFG2);
300 esp->prev_cfg3 = esp->target[0].esp_config3;
301 esp_write8(esp->prev_cfg3, ESP_CFG3);
302 break;
303
304 case FASHME:
305 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
306
307
308 case FAS236:
309 case PCSCSI:
310
311 esp_write8(esp->config2, ESP_CFG2);
312 if (esp->rev == FASHME) {
313 u8 cfg3 = esp->target[0].esp_config3;
314
315 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
316 if (esp->scsi_id >= 8)
317 cfg3 |= ESP_CONFIG3_IDBIT3;
318 esp_set_all_config3(esp, cfg3);
319 } else {
320 u32 cfg3 = esp->target[0].esp_config3;
321
322 cfg3 |= ESP_CONFIG3_FCLK;
323 esp_set_all_config3(esp, cfg3);
324 }
325 esp->prev_cfg3 = esp->target[0].esp_config3;
326 esp_write8(esp->prev_cfg3, ESP_CFG3);
327 if (esp->rev == FASHME) {
328 esp->radelay = 80;
329 } else {
330 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
331 esp->radelay = 0;
332 else
333 esp->radelay = 96;
334 }
335 break;
336
337 case FAS100A:
338
339 esp_write8(esp->config2, ESP_CFG2);
340 esp_set_all_config3(esp,
341 (esp->target[0].esp_config3 |
342 ESP_CONFIG3_FCLOCK));
343 esp->prev_cfg3 = esp->target[0].esp_config3;
344 esp_write8(esp->prev_cfg3, ESP_CFG3);
345 esp->radelay = 32;
346 break;
347
348 default:
349 break;
350 }
351
352
353 esp_write8(esp->cfact, ESP_CFACT);
354
355 esp->prev_stp = 0;
356 esp_write8(esp->prev_stp, ESP_STP);
357
358 esp->prev_soff = 0;
359 esp_write8(esp->prev_soff, ESP_SOFF);
360
361 esp_write8(esp->neg_defp, ESP_TIMEO);
362
363
364 esp_read8(ESP_INTRPT);
365 udelay(100);
366}
367
368static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
369{
370 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
371 struct scatterlist *sg = scsi_sglist(cmd);
372 int total = 0, i;
373
374 if (cmd->sc_data_direction == DMA_NONE)
375 return;
376
377 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
378
379
380
381
382 spriv->num_sg = scsi_sg_count(cmd);
383 for (i = 0; i < spriv->num_sg; i++) {
384 sg[i].dma_address = (uintptr_t)sg_virt(&sg[i]);
385 total += sg_dma_len(&sg[i]);
386 }
387 } else {
388 spriv->num_sg = scsi_dma_map(cmd);
389 for (i = 0; i < spriv->num_sg; i++)
390 total += sg_dma_len(&sg[i]);
391 }
392 spriv->cur_residue = sg_dma_len(sg);
393 spriv->cur_sg = sg;
394 spriv->tot_residue = total;
395}
396
397static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
398 struct scsi_cmnd *cmd)
399{
400 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
401
402 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
403 return ent->sense_dma +
404 (ent->sense_ptr - cmd->sense_buffer);
405 }
406
407 return sg_dma_address(p->cur_sg) +
408 (sg_dma_len(p->cur_sg) -
409 p->cur_residue);
410}
411
412static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
413 struct scsi_cmnd *cmd)
414{
415 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
416
417 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
418 return SCSI_SENSE_BUFFERSIZE -
419 (ent->sense_ptr - cmd->sense_buffer);
420 }
421 return p->cur_residue;
422}
423
424static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
425 struct scsi_cmnd *cmd, unsigned int len)
426{
427 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
428
429 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
430 ent->sense_ptr += len;
431 return;
432 }
433
434 p->cur_residue -= len;
435 p->tot_residue -= len;
436 if (p->cur_residue < 0 || p->tot_residue < 0) {
437 shost_printk(KERN_ERR, esp->host,
438 "Data transfer overflow.\n");
439 shost_printk(KERN_ERR, esp->host,
440 "cur_residue[%d] tot_residue[%d] len[%u]\n",
441 p->cur_residue, p->tot_residue, len);
442 p->cur_residue = 0;
443 p->tot_residue = 0;
444 }
445 if (!p->cur_residue && p->tot_residue) {
446 p->cur_sg++;
447 p->cur_residue = sg_dma_len(p->cur_sg);
448 }
449}
450
451static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
452{
453 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
454 scsi_dma_unmap(cmd);
455}
456
457static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
458{
459 struct scsi_cmnd *cmd = ent->cmd;
460 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
461
462 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
463 ent->saved_sense_ptr = ent->sense_ptr;
464 return;
465 }
466 ent->saved_cur_residue = spriv->cur_residue;
467 ent->saved_cur_sg = spriv->cur_sg;
468 ent->saved_tot_residue = spriv->tot_residue;
469}
470
471static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
472{
473 struct scsi_cmnd *cmd = ent->cmd;
474 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
475
476 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
477 ent->sense_ptr = ent->saved_sense_ptr;
478 return;
479 }
480 spriv->cur_residue = ent->saved_cur_residue;
481 spriv->cur_sg = ent->saved_cur_sg;
482 spriv->tot_residue = ent->saved_tot_residue;
483}
484
485static void esp_write_tgt_config3(struct esp *esp, int tgt)
486{
487 if (esp->rev > ESP100A) {
488 u8 val = esp->target[tgt].esp_config3;
489
490 if (val != esp->prev_cfg3) {
491 esp->prev_cfg3 = val;
492 esp_write8(val, ESP_CFG3);
493 }
494 }
495}
496
497static void esp_write_tgt_sync(struct esp *esp, int tgt)
498{
499 u8 off = esp->target[tgt].esp_offset;
500 u8 per = esp->target[tgt].esp_period;
501
502 if (off != esp->prev_soff) {
503 esp->prev_soff = off;
504 esp_write8(off, ESP_SOFF);
505 }
506 if (per != esp->prev_stp) {
507 esp->prev_stp = per;
508 esp_write8(per, ESP_STP);
509 }
510}
511
512static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
513{
514 if (esp->rev == FASHME) {
515
516 if (dma_len > (1U << 24))
517 dma_len = (1U << 24);
518 } else {
519 u32 base, end;
520
521
522
523
524
525
526
527 if (dma_len > (1U << 16))
528 dma_len = (1U << 16);
529
530
531
532
533 base = dma_addr & ((1U << 24) - 1U);
534 end = base + dma_len;
535 if (end > (1U << 24))
536 end = (1U <<24);
537 dma_len = end - base;
538 }
539 return dma_len;
540}
541
542static int esp_need_to_nego_wide(struct esp_target_data *tp)
543{
544 struct scsi_target *target = tp->starget;
545
546 return spi_width(target) != tp->nego_goal_width;
547}
548
549static int esp_need_to_nego_sync(struct esp_target_data *tp)
550{
551 struct scsi_target *target = tp->starget;
552
553
554 if (!spi_offset(target) && !tp->nego_goal_offset)
555 return 0;
556
557 if (spi_offset(target) == tp->nego_goal_offset &&
558 spi_period(target) == tp->nego_goal_period)
559 return 0;
560
561 return 1;
562}
563
564static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
565 struct esp_lun_data *lp)
566{
567 if (!ent->orig_tag[0]) {
568
569 if (lp->non_tagged_cmd)
570 return -EBUSY;
571
572 if (lp->hold) {
573
574
575
576 if (lp->num_tagged)
577 return -EBUSY;
578
579
580
581
582 lp->hold = 0;
583 } else if (lp->num_tagged) {
584
585
586
587 lp->hold = 1;
588 return -EBUSY;
589 }
590
591 lp->non_tagged_cmd = ent;
592 return 0;
593 }
594
595
596 if (lp->non_tagged_cmd || lp->hold)
597 return -EBUSY;
598
599 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
600
601 lp->tagged_cmds[ent->orig_tag[1]] = ent;
602 lp->num_tagged++;
603
604 return 0;
605}
606
607static void esp_free_lun_tag(struct esp_cmd_entry *ent,
608 struct esp_lun_data *lp)
609{
610 if (ent->orig_tag[0]) {
611 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
612 lp->tagged_cmds[ent->orig_tag[1]] = NULL;
613 lp->num_tagged--;
614 } else {
615 BUG_ON(lp->non_tagged_cmd != ent);
616 lp->non_tagged_cmd = NULL;
617 }
618}
619
620static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
621{
622 ent->sense_ptr = ent->cmd->sense_buffer;
623 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
624 ent->sense_dma = (uintptr_t)ent->sense_ptr;
625 return;
626 }
627
628 ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
629 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
630}
631
632static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
633{
634 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
635 dma_unmap_single(esp->dev, ent->sense_dma,
636 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
637 ent->sense_ptr = NULL;
638}
639
640
641
642
643
644
645
646
647static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
648{
649 struct scsi_cmnd *cmd = ent->cmd;
650 struct scsi_device *dev = cmd->device;
651 int tgt, lun;
652 u8 *p, val;
653
654 tgt = dev->id;
655 lun = dev->lun;
656
657
658 if (!ent->sense_ptr) {
659 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
660 tgt, lun);
661 esp_map_sense(esp, ent);
662 }
663 ent->saved_sense_ptr = ent->sense_ptr;
664
665 esp->active_cmd = ent;
666
667 p = esp->command_block;
668 esp->msg_out_len = 0;
669
670 *p++ = IDENTIFY(0, lun);
671 *p++ = REQUEST_SENSE;
672 *p++ = ((dev->scsi_level <= SCSI_2) ?
673 (lun << 5) : 0);
674 *p++ = 0;
675 *p++ = 0;
676 *p++ = SCSI_SENSE_BUFFERSIZE;
677 *p++ = 0;
678
679 esp->select_state = ESP_SELECT_BASIC;
680
681 val = tgt;
682 if (esp->rev == FASHME)
683 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
684 esp_write8(val, ESP_BUSID);
685
686 esp_write_tgt_sync(esp, tgt);
687 esp_write_tgt_config3(esp, tgt);
688
689 val = (p - esp->command_block);
690
691 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
692}
693
694static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
695{
696 struct esp_cmd_entry *ent;
697
698 list_for_each_entry(ent, &esp->queued_cmds, list) {
699 struct scsi_cmnd *cmd = ent->cmd;
700 struct scsi_device *dev = cmd->device;
701 struct esp_lun_data *lp = dev->hostdata;
702
703 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
704 ent->tag[0] = 0;
705 ent->tag[1] = 0;
706 return ent;
707 }
708
709 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
710 ent->tag[0] = 0;
711 ent->tag[1] = 0;
712 }
713 ent->orig_tag[0] = ent->tag[0];
714 ent->orig_tag[1] = ent->tag[1];
715
716 if (esp_alloc_lun_tag(ent, lp) < 0)
717 continue;
718
719 return ent;
720 }
721
722 return NULL;
723}
724
725static void esp_maybe_execute_command(struct esp *esp)
726{
727 struct esp_target_data *tp;
728 struct scsi_device *dev;
729 struct scsi_cmnd *cmd;
730 struct esp_cmd_entry *ent;
731 bool select_and_stop = false;
732 int tgt, lun, i;
733 u32 val, start_cmd;
734 u8 *p;
735
736 if (esp->active_cmd ||
737 (esp->flags & ESP_FLAG_RESETTING))
738 return;
739
740 ent = find_and_prep_issuable_command(esp);
741 if (!ent)
742 return;
743
744 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
745 esp_autosense(esp, ent);
746 return;
747 }
748
749 cmd = ent->cmd;
750 dev = cmd->device;
751 tgt = dev->id;
752 lun = dev->lun;
753 tp = &esp->target[tgt];
754
755 list_move(&ent->list, &esp->active_cmds);
756
757 esp->active_cmd = ent;
758
759 esp_map_dma(esp, cmd);
760 esp_save_pointers(esp, ent);
761
762 if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
763 select_and_stop = true;
764
765 p = esp->command_block;
766
767 esp->msg_out_len = 0;
768 if (tp->flags & ESP_TGT_CHECK_NEGO) {
769
770
771
772 if (tp->flags & ESP_TGT_BROKEN) {
773 tp->flags &= ~ESP_TGT_DISCONNECT;
774 tp->nego_goal_period = 0;
775 tp->nego_goal_offset = 0;
776 tp->nego_goal_width = 0;
777 tp->nego_goal_tags = 0;
778 }
779
780
781 if (spi_width(tp->starget) == tp->nego_goal_width &&
782 spi_period(tp->starget) == tp->nego_goal_period &&
783 spi_offset(tp->starget) == tp->nego_goal_offset) {
784 tp->flags &= ~ESP_TGT_CHECK_NEGO;
785 goto build_identify;
786 }
787
788 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
789 esp->msg_out_len =
790 spi_populate_width_msg(&esp->msg_out[0],
791 (tp->nego_goal_width ?
792 1 : 0));
793 tp->flags |= ESP_TGT_NEGO_WIDE;
794 } else if (esp_need_to_nego_sync(tp)) {
795 esp->msg_out_len =
796 spi_populate_sync_msg(&esp->msg_out[0],
797 tp->nego_goal_period,
798 tp->nego_goal_offset);
799 tp->flags |= ESP_TGT_NEGO_SYNC;
800 } else {
801 tp->flags &= ~ESP_TGT_CHECK_NEGO;
802 }
803
804
805 if (esp->msg_out_len)
806 select_and_stop = true;
807 }
808
809build_identify:
810 *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
811
812 if (ent->tag[0] && esp->rev == ESP100) {
813
814
815
816 select_and_stop = true;
817 }
818
819 if (select_and_stop) {
820 esp->cmd_bytes_left = cmd->cmd_len;
821 esp->cmd_bytes_ptr = &cmd->cmnd[0];
822
823 if (ent->tag[0]) {
824 for (i = esp->msg_out_len - 1;
825 i >= 0; i--)
826 esp->msg_out[i + 2] = esp->msg_out[i];
827 esp->msg_out[0] = ent->tag[0];
828 esp->msg_out[1] = ent->tag[1];
829 esp->msg_out_len += 2;
830 }
831
832 start_cmd = ESP_CMD_SELAS;
833 esp->select_state = ESP_SELECT_MSGOUT;
834 } else {
835 start_cmd = ESP_CMD_SELA;
836 if (ent->tag[0]) {
837 *p++ = ent->tag[0];
838 *p++ = ent->tag[1];
839
840 start_cmd = ESP_CMD_SA3;
841 }
842
843 for (i = 0; i < cmd->cmd_len; i++)
844 *p++ = cmd->cmnd[i];
845
846 esp->select_state = ESP_SELECT_BASIC;
847 }
848 val = tgt;
849 if (esp->rev == FASHME)
850 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
851 esp_write8(val, ESP_BUSID);
852
853 esp_write_tgt_sync(esp, tgt);
854 esp_write_tgt_config3(esp, tgt);
855
856 val = (p - esp->command_block);
857
858 if (esp_debug & ESP_DEBUG_SCSICMD) {
859 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
860 for (i = 0; i < cmd->cmd_len; i++)
861 printk("%02x ", cmd->cmnd[i]);
862 printk("]\n");
863 }
864
865 esp_send_dma_cmd(esp, val, 16, start_cmd);
866}
867
868static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
869{
870 struct list_head *head = &esp->esp_cmd_pool;
871 struct esp_cmd_entry *ret;
872
873 if (list_empty(head)) {
874 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
875 } else {
876 ret = list_entry(head->next, struct esp_cmd_entry, list);
877 list_del(&ret->list);
878 memset(ret, 0, sizeof(*ret));
879 }
880 return ret;
881}
882
883static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
884{
885 list_add(&ent->list, &esp->esp_cmd_pool);
886}
887
888static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
889 struct scsi_cmnd *cmd, unsigned int result)
890{
891 struct scsi_device *dev = cmd->device;
892 int tgt = dev->id;
893 int lun = dev->lun;
894
895 esp->active_cmd = NULL;
896 esp_unmap_dma(esp, cmd);
897 esp_free_lun_tag(ent, dev->hostdata);
898 cmd->result = result;
899
900 if (ent->eh_done) {
901 complete(ent->eh_done);
902 ent->eh_done = NULL;
903 }
904
905 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
906 esp_unmap_sense(esp, ent);
907
908
909
910
911
912 cmd->result = ((DRIVER_SENSE << 24) |
913 (DID_OK << 16) |
914 (COMMAND_COMPLETE << 8) |
915 (SAM_STAT_CHECK_CONDITION << 0));
916
917 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
918 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
919 int i;
920
921 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
922 esp->host->unique_id, tgt, lun);
923 for (i = 0; i < 18; i++)
924 printk("%02x ", cmd->sense_buffer[i]);
925 printk("]\n");
926 }
927 }
928
929 cmd->scsi_done(cmd);
930
931 list_del(&ent->list);
932 esp_put_ent(esp, ent);
933
934 esp_maybe_execute_command(esp);
935}
936
937static unsigned int compose_result(unsigned int status, unsigned int message,
938 unsigned int driver_code)
939{
940 return (status | (message << 8) | (driver_code << 16));
941}
942
943static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
944{
945 struct scsi_device *dev = ent->cmd->device;
946 struct esp_lun_data *lp = dev->hostdata;
947
948 scsi_track_queue_full(dev, lp->num_tagged - 1);
949}
950
951static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
952{
953 struct scsi_device *dev = cmd->device;
954 struct esp *esp = shost_priv(dev->host);
955 struct esp_cmd_priv *spriv;
956 struct esp_cmd_entry *ent;
957
958 ent = esp_get_ent(esp);
959 if (!ent)
960 return SCSI_MLQUEUE_HOST_BUSY;
961
962 ent->cmd = cmd;
963
964 cmd->scsi_done = done;
965
966 spriv = ESP_CMD_PRIV(cmd);
967 spriv->num_sg = 0;
968
969 list_add_tail(&ent->list, &esp->queued_cmds);
970
971 esp_maybe_execute_command(esp);
972
973 return 0;
974}
975
976static DEF_SCSI_QCMD(esp_queuecommand)
977
978static int esp_check_gross_error(struct esp *esp)
979{
980 if (esp->sreg & ESP_STAT_SPAM) {
981
982
983
984
985
986
987 shost_printk(KERN_ERR, esp->host,
988 "Gross error sreg[%02x]\n", esp->sreg);
989
990 return 1;
991 }
992 return 0;
993}
994
995static int esp_check_spur_intr(struct esp *esp)
996{
997 switch (esp->rev) {
998 case ESP100:
999 case ESP100A:
1000
1001
1002
1003 esp->sreg &= ~ESP_STAT_INTR;
1004 break;
1005
1006 default:
1007 if (!(esp->sreg & ESP_STAT_INTR)) {
1008 if (esp->ireg & ESP_INTR_SR)
1009 return 1;
1010
1011
1012
1013
1014 if (!esp->ops->dma_error(esp)) {
1015 shost_printk(KERN_ERR, esp->host,
1016 "Spurious irq, sreg=%02x.\n",
1017 esp->sreg);
1018 return -1;
1019 }
1020
1021 shost_printk(KERN_ERR, esp->host, "DMA error\n");
1022
1023
1024 return -1;
1025 }
1026 break;
1027 }
1028
1029 return 0;
1030}
1031
1032static void esp_schedule_reset(struct esp *esp)
1033{
1034 esp_log_reset("esp_schedule_reset() from %pf\n",
1035 __builtin_return_address(0));
1036 esp->flags |= ESP_FLAG_RESETTING;
1037 esp_event(esp, ESP_EVENT_RESET);
1038}
1039
1040
1041
1042
1043
1044static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1045 struct esp_lun_data *lp)
1046{
1047 struct esp_cmd_entry *ent;
1048 int i;
1049
1050 if (!lp->num_tagged) {
1051 shost_printk(KERN_ERR, esp->host,
1052 "Reconnect w/num_tagged==0\n");
1053 return NULL;
1054 }
1055
1056 esp_log_reconnect("reconnect tag, ");
1057
1058 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1059 if (esp->ops->irq_pending(esp))
1060 break;
1061 }
1062 if (i == ESP_QUICKIRQ_LIMIT) {
1063 shost_printk(KERN_ERR, esp->host,
1064 "Reconnect IRQ1 timeout\n");
1065 return NULL;
1066 }
1067
1068 esp->sreg = esp_read8(ESP_STATUS);
1069 esp->ireg = esp_read8(ESP_INTRPT);
1070
1071 esp_log_reconnect("IRQ(%d:%x:%x), ",
1072 i, esp->ireg, esp->sreg);
1073
1074 if (esp->ireg & ESP_INTR_DC) {
1075 shost_printk(KERN_ERR, esp->host,
1076 "Reconnect, got disconnect.\n");
1077 return NULL;
1078 }
1079
1080 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1081 shost_printk(KERN_ERR, esp->host,
1082 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1083 return NULL;
1084 }
1085
1086
1087 esp->command_block[0] = 0xff;
1088 esp->command_block[1] = 0xff;
1089 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1090 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1091
1092
1093 scsi_esp_cmd(esp, ESP_CMD_MOK);
1094
1095 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1096 if (esp->ops->irq_pending(esp)) {
1097 esp->sreg = esp_read8(ESP_STATUS);
1098 esp->ireg = esp_read8(ESP_INTRPT);
1099 if (esp->ireg & ESP_INTR_FDONE)
1100 break;
1101 }
1102 udelay(1);
1103 }
1104 if (i == ESP_RESELECT_TAG_LIMIT) {
1105 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
1106 return NULL;
1107 }
1108 esp->ops->dma_drain(esp);
1109 esp->ops->dma_invalidate(esp);
1110
1111 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1112 i, esp->ireg, esp->sreg,
1113 esp->command_block[0],
1114 esp->command_block[1]);
1115
1116 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1117 esp->command_block[0] > ORDERED_QUEUE_TAG) {
1118 shost_printk(KERN_ERR, esp->host,
1119 "Reconnect, bad tag type %02x.\n",
1120 esp->command_block[0]);
1121 return NULL;
1122 }
1123
1124 ent = lp->tagged_cmds[esp->command_block[1]];
1125 if (!ent) {
1126 shost_printk(KERN_ERR, esp->host,
1127 "Reconnect, no entry for tag %02x.\n",
1128 esp->command_block[1]);
1129 return NULL;
1130 }
1131
1132 return ent;
1133}
1134
1135static int esp_reconnect(struct esp *esp)
1136{
1137 struct esp_cmd_entry *ent;
1138 struct esp_target_data *tp;
1139 struct esp_lun_data *lp;
1140 struct scsi_device *dev;
1141 int target, lun;
1142
1143 BUG_ON(esp->active_cmd);
1144 if (esp->rev == FASHME) {
1145
1146
1147
1148 target = esp->fifo[0];
1149 lun = esp->fifo[1] & 0x7;
1150 } else {
1151 u8 bits = esp_read8(ESP_FDATA);
1152
1153
1154
1155
1156
1157
1158
1159 if (!(bits & esp->scsi_id_mask))
1160 goto do_reset;
1161 bits &= ~esp->scsi_id_mask;
1162 if (!bits || (bits & (bits - 1)))
1163 goto do_reset;
1164
1165 target = ffs(bits) - 1;
1166 lun = (esp_read8(ESP_FDATA) & 0x7);
1167
1168 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1169 if (esp->rev == ESP100) {
1170 u8 ireg = esp_read8(ESP_INTRPT);
1171
1172
1173
1174
1175
1176 if (ireg & ESP_INTR_SR)
1177 goto do_reset;
1178 }
1179 scsi_esp_cmd(esp, ESP_CMD_NULL);
1180 }
1181
1182 esp_write_tgt_sync(esp, target);
1183 esp_write_tgt_config3(esp, target);
1184
1185 scsi_esp_cmd(esp, ESP_CMD_MOK);
1186
1187 if (esp->rev == FASHME)
1188 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1189 ESP_BUSID);
1190
1191 tp = &esp->target[target];
1192 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1193 if (!dev) {
1194 shost_printk(KERN_ERR, esp->host,
1195 "Reconnect, no lp tgt[%u] lun[%u]\n",
1196 target, lun);
1197 goto do_reset;
1198 }
1199 lp = dev->hostdata;
1200
1201 ent = lp->non_tagged_cmd;
1202 if (!ent) {
1203 ent = esp_reconnect_with_tag(esp, lp);
1204 if (!ent)
1205 goto do_reset;
1206 }
1207
1208 esp->active_cmd = ent;
1209
1210 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1211 esp_restore_pointers(esp, ent);
1212 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1213 return 1;
1214
1215do_reset:
1216 esp_schedule_reset(esp);
1217 return 0;
1218}
1219
1220static int esp_finish_select(struct esp *esp)
1221{
1222 struct esp_cmd_entry *ent;
1223 struct scsi_cmnd *cmd;
1224
1225
1226 esp->select_state = ESP_SELECT_NONE;
1227
1228 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1229 ent = esp->active_cmd;
1230 cmd = ent->cmd;
1231
1232 if (esp->ops->dma_error(esp)) {
1233
1234
1235
1236 esp_schedule_reset(esp);
1237 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1238 return 0;
1239 }
1240
1241 esp->ops->dma_invalidate(esp);
1242
1243 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1244 struct esp_target_data *tp = &esp->target[cmd->device->id];
1245
1246
1247
1248
1249
1250 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1251 esp_unmap_dma(esp, cmd);
1252 esp_free_lun_tag(ent, cmd->device->hostdata);
1253 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1254 esp->cmd_bytes_ptr = NULL;
1255 esp->cmd_bytes_left = 0;
1256 } else {
1257 esp_unmap_sense(esp, ent);
1258 }
1259
1260
1261
1262
1263 list_move(&ent->list, &esp->queued_cmds);
1264 esp->active_cmd = NULL;
1265
1266
1267
1268
1269 return 0;
1270 }
1271
1272 if (esp->ireg == ESP_INTR_DC) {
1273 struct scsi_device *dev = cmd->device;
1274
1275
1276
1277
1278
1279 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1280
1281 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1282 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1283 return 1;
1284 }
1285
1286 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1287
1288
1289
1290 if (esp->rev <= ESP236) {
1291 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1292
1293 scsi_esp_cmd(esp, ESP_CMD_NULL);
1294
1295 if (!fcnt &&
1296 (!esp->prev_soff ||
1297 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1298 esp_flush_fifo(esp);
1299 }
1300
1301
1302
1303
1304 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1305 return 0;
1306 }
1307
1308 shost_printk(KERN_INFO, esp->host,
1309 "Unexpected selection completion ireg[%x]\n", esp->ireg);
1310 esp_schedule_reset(esp);
1311 return 0;
1312}
1313
1314static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1315 struct scsi_cmnd *cmd)
1316{
1317 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1318
1319 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1320 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1321 fifo_cnt <<= 1;
1322
1323 ecount = 0;
1324 if (!(esp->sreg & ESP_STAT_TCNT)) {
1325 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1326 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1327 if (esp->rev == FASHME)
1328 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1329 if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1330 ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
1331 }
1332
1333 bytes_sent = esp->data_dma_len;
1334 bytes_sent -= ecount;
1335 bytes_sent -= esp->send_cmd_residual;
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345 if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1346 size_t count = 1;
1347 size_t offset = bytes_sent;
1348 u8 bval = esp_read8(ESP_FDATA);
1349
1350 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1351 ent->sense_ptr[bytes_sent] = bval;
1352 else {
1353 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1354 u8 *ptr;
1355
1356 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
1357 &offset, &count);
1358 if (likely(ptr)) {
1359 *(ptr + offset) = bval;
1360 scsi_kunmap_atomic_sg(ptr);
1361 }
1362 }
1363 bytes_sent += fifo_cnt;
1364 ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1365 }
1366 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1367 bytes_sent -= fifo_cnt;
1368
1369 flush_fifo = 0;
1370 if (!esp->prev_soff) {
1371
1372 flush_fifo = 1;
1373 } else {
1374 if (esp->rev == ESP100) {
1375 u32 fflags, phase;
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388 esp->sreg = esp_read8(ESP_STATUS);
1389 phase = esp->sreg & ESP_STAT_PMASK;
1390 fflags = esp_read8(ESP_FFLAGS);
1391
1392 if ((phase == ESP_DOP &&
1393 (fflags & ESP_FF_ONOTZERO)) ||
1394 (phase == ESP_DIP &&
1395 (fflags & ESP_FF_FBYTES)))
1396 return -1;
1397 }
1398 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1399 flush_fifo = 1;
1400 }
1401
1402 if (flush_fifo)
1403 esp_flush_fifo(esp);
1404
1405 return bytes_sent;
1406}
1407
1408static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1409 u8 scsi_period, u8 scsi_offset,
1410 u8 esp_stp, u8 esp_soff)
1411{
1412 spi_period(tp->starget) = scsi_period;
1413 spi_offset(tp->starget) = scsi_offset;
1414 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1415
1416 if (esp_soff) {
1417 esp_stp &= 0x1f;
1418 esp_soff |= esp->radelay;
1419 if (esp->rev >= FAS236) {
1420 u8 bit = ESP_CONFIG3_FSCSI;
1421 if (esp->rev >= FAS100A)
1422 bit = ESP_CONFIG3_FAST;
1423
1424 if (scsi_period < 50) {
1425 if (esp->rev == FASHME)
1426 esp_soff &= ~esp->radelay;
1427 tp->esp_config3 |= bit;
1428 } else {
1429 tp->esp_config3 &= ~bit;
1430 }
1431 esp->prev_cfg3 = tp->esp_config3;
1432 esp_write8(esp->prev_cfg3, ESP_CFG3);
1433 }
1434 }
1435
1436 tp->esp_period = esp->prev_stp = esp_stp;
1437 tp->esp_offset = esp->prev_soff = esp_soff;
1438
1439 esp_write8(esp_soff, ESP_SOFF);
1440 esp_write8(esp_stp, ESP_STP);
1441
1442 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1443
1444 spi_display_xfer_agreement(tp->starget);
1445}
1446
1447static void esp_msgin_reject(struct esp *esp)
1448{
1449 struct esp_cmd_entry *ent = esp->active_cmd;
1450 struct scsi_cmnd *cmd = ent->cmd;
1451 struct esp_target_data *tp;
1452 int tgt;
1453
1454 tgt = cmd->device->id;
1455 tp = &esp->target[tgt];
1456
1457 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1458 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1459
1460 if (!esp_need_to_nego_sync(tp)) {
1461 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1462 scsi_esp_cmd(esp, ESP_CMD_RATN);
1463 } else {
1464 esp->msg_out_len =
1465 spi_populate_sync_msg(&esp->msg_out[0],
1466 tp->nego_goal_period,
1467 tp->nego_goal_offset);
1468 tp->flags |= ESP_TGT_NEGO_SYNC;
1469 scsi_esp_cmd(esp, ESP_CMD_SATN);
1470 }
1471 return;
1472 }
1473
1474 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1475 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1476 tp->esp_period = 0;
1477 tp->esp_offset = 0;
1478 esp_setsync(esp, tp, 0, 0, 0, 0);
1479 scsi_esp_cmd(esp, ESP_CMD_RATN);
1480 return;
1481 }
1482
1483 shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
1484 esp_schedule_reset(esp);
1485}
1486
1487static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1488{
1489 u8 period = esp->msg_in[3];
1490 u8 offset = esp->msg_in[4];
1491 u8 stp;
1492
1493 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1494 goto do_reject;
1495
1496 if (offset > 15)
1497 goto do_reject;
1498
1499 if (offset) {
1500 int one_clock;
1501
1502 if (period > esp->max_period) {
1503 period = offset = 0;
1504 goto do_sdtr;
1505 }
1506 if (period < esp->min_period)
1507 goto do_reject;
1508
1509 one_clock = esp->ccycle / 1000;
1510 stp = DIV_ROUND_UP(period << 2, one_clock);
1511 if (stp && esp->rev >= FAS236) {
1512 if (stp >= 50)
1513 stp--;
1514 }
1515 } else {
1516 stp = 0;
1517 }
1518
1519 esp_setsync(esp, tp, period, offset, stp, offset);
1520 return;
1521
1522do_reject:
1523 esp->msg_out[0] = MESSAGE_REJECT;
1524 esp->msg_out_len = 1;
1525 scsi_esp_cmd(esp, ESP_CMD_SATN);
1526 return;
1527
1528do_sdtr:
1529 tp->nego_goal_period = period;
1530 tp->nego_goal_offset = offset;
1531 esp->msg_out_len =
1532 spi_populate_sync_msg(&esp->msg_out[0],
1533 tp->nego_goal_period,
1534 tp->nego_goal_offset);
1535 scsi_esp_cmd(esp, ESP_CMD_SATN);
1536}
1537
1538static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1539{
1540 int size = 8 << esp->msg_in[3];
1541 u8 cfg3;
1542
1543 if (esp->rev != FASHME)
1544 goto do_reject;
1545
1546 if (size != 8 && size != 16)
1547 goto do_reject;
1548
1549 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1550 goto do_reject;
1551
1552 cfg3 = tp->esp_config3;
1553 if (size == 16) {
1554 tp->flags |= ESP_TGT_WIDE;
1555 cfg3 |= ESP_CONFIG3_EWIDE;
1556 } else {
1557 tp->flags &= ~ESP_TGT_WIDE;
1558 cfg3 &= ~ESP_CONFIG3_EWIDE;
1559 }
1560 tp->esp_config3 = cfg3;
1561 esp->prev_cfg3 = cfg3;
1562 esp_write8(cfg3, ESP_CFG3);
1563
1564 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1565
1566 spi_period(tp->starget) = 0;
1567 spi_offset(tp->starget) = 0;
1568 if (!esp_need_to_nego_sync(tp)) {
1569 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1570 scsi_esp_cmd(esp, ESP_CMD_RATN);
1571 } else {
1572 esp->msg_out_len =
1573 spi_populate_sync_msg(&esp->msg_out[0],
1574 tp->nego_goal_period,
1575 tp->nego_goal_offset);
1576 tp->flags |= ESP_TGT_NEGO_SYNC;
1577 scsi_esp_cmd(esp, ESP_CMD_SATN);
1578 }
1579 return;
1580
1581do_reject:
1582 esp->msg_out[0] = MESSAGE_REJECT;
1583 esp->msg_out_len = 1;
1584 scsi_esp_cmd(esp, ESP_CMD_SATN);
1585}
1586
1587static void esp_msgin_extended(struct esp *esp)
1588{
1589 struct esp_cmd_entry *ent = esp->active_cmd;
1590 struct scsi_cmnd *cmd = ent->cmd;
1591 struct esp_target_data *tp;
1592 int tgt = cmd->device->id;
1593
1594 tp = &esp->target[tgt];
1595 if (esp->msg_in[2] == EXTENDED_SDTR) {
1596 esp_msgin_sdtr(esp, tp);
1597 return;
1598 }
1599 if (esp->msg_in[2] == EXTENDED_WDTR) {
1600 esp_msgin_wdtr(esp, tp);
1601 return;
1602 }
1603
1604 shost_printk(KERN_INFO, esp->host,
1605 "Unexpected extended msg type %x\n", esp->msg_in[2]);
1606
1607 esp->msg_out[0] = MESSAGE_REJECT;
1608 esp->msg_out_len = 1;
1609 scsi_esp_cmd(esp, ESP_CMD_SATN);
1610}
1611
1612
1613
1614
1615static int esp_msgin_process(struct esp *esp)
1616{
1617 u8 msg0 = esp->msg_in[0];
1618 int len = esp->msg_in_len;
1619
1620 if (msg0 & 0x80) {
1621
1622 shost_printk(KERN_INFO, esp->host,
1623 "Unexpected msgin identify\n");
1624 return 0;
1625 }
1626
1627 switch (msg0) {
1628 case EXTENDED_MESSAGE:
1629 if (len == 1)
1630 return 1;
1631 if (len < esp->msg_in[1] + 2)
1632 return 1;
1633 esp_msgin_extended(esp);
1634 return 0;
1635
1636 case IGNORE_WIDE_RESIDUE: {
1637 struct esp_cmd_entry *ent;
1638 struct esp_cmd_priv *spriv;
1639 if (len == 1)
1640 return 1;
1641
1642 if (esp->msg_in[1] != 1)
1643 goto do_reject;
1644
1645 ent = esp->active_cmd;
1646 spriv = ESP_CMD_PRIV(ent->cmd);
1647
1648 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1649 spriv->cur_sg--;
1650 spriv->cur_residue = 1;
1651 } else
1652 spriv->cur_residue++;
1653 spriv->tot_residue++;
1654 return 0;
1655 }
1656 case NOP:
1657 return 0;
1658 case RESTORE_POINTERS:
1659 esp_restore_pointers(esp, esp->active_cmd);
1660 return 0;
1661 case SAVE_POINTERS:
1662 esp_save_pointers(esp, esp->active_cmd);
1663 return 0;
1664
1665 case COMMAND_COMPLETE:
1666 case DISCONNECT: {
1667 struct esp_cmd_entry *ent = esp->active_cmd;
1668
1669 ent->message = msg0;
1670 esp_event(esp, ESP_EVENT_FREE_BUS);
1671 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1672 return 0;
1673 }
1674 case MESSAGE_REJECT:
1675 esp_msgin_reject(esp);
1676 return 0;
1677
1678 default:
1679 do_reject:
1680 esp->msg_out[0] = MESSAGE_REJECT;
1681 esp->msg_out_len = 1;
1682 scsi_esp_cmd(esp, ESP_CMD_SATN);
1683 return 0;
1684 }
1685}
1686
1687static int esp_process_event(struct esp *esp)
1688{
1689 int write, i;
1690
1691again:
1692 write = 0;
1693 esp_log_event("process event %d phase %x\n",
1694 esp->event, esp->sreg & ESP_STAT_PMASK);
1695 switch (esp->event) {
1696 case ESP_EVENT_CHECK_PHASE:
1697 switch (esp->sreg & ESP_STAT_PMASK) {
1698 case ESP_DOP:
1699 esp_event(esp, ESP_EVENT_DATA_OUT);
1700 break;
1701 case ESP_DIP:
1702 esp_event(esp, ESP_EVENT_DATA_IN);
1703 break;
1704 case ESP_STATP:
1705 esp_flush_fifo(esp);
1706 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1707 esp_event(esp, ESP_EVENT_STATUS);
1708 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1709 return 1;
1710
1711 case ESP_MOP:
1712 esp_event(esp, ESP_EVENT_MSGOUT);
1713 break;
1714
1715 case ESP_MIP:
1716 esp_event(esp, ESP_EVENT_MSGIN);
1717 break;
1718
1719 case ESP_CMDP:
1720 esp_event(esp, ESP_EVENT_CMD_START);
1721 break;
1722
1723 default:
1724 shost_printk(KERN_INFO, esp->host,
1725 "Unexpected phase, sreg=%02x\n",
1726 esp->sreg);
1727 esp_schedule_reset(esp);
1728 return 0;
1729 }
1730 goto again;
1731
1732 case ESP_EVENT_DATA_IN:
1733 write = 1;
1734
1735
1736 case ESP_EVENT_DATA_OUT: {
1737 struct esp_cmd_entry *ent = esp->active_cmd;
1738 struct scsi_cmnd *cmd = ent->cmd;
1739 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1740 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1741
1742 if (esp->rev == ESP100)
1743 scsi_esp_cmd(esp, ESP_CMD_NULL);
1744
1745 if (write)
1746 ent->flags |= ESP_CMD_FLAG_WRITE;
1747 else
1748 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1749
1750 if (esp->ops->dma_length_limit)
1751 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1752 dma_len);
1753 else
1754 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1755
1756 esp->data_dma_len = dma_len;
1757
1758 if (!dma_len) {
1759 shost_printk(KERN_ERR, esp->host,
1760 "DMA length is zero!\n");
1761 shost_printk(KERN_ERR, esp->host,
1762 "cur adr[%08llx] len[%08x]\n",
1763 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1764 esp_cur_dma_len(ent, cmd));
1765 esp_schedule_reset(esp);
1766 return 0;
1767 }
1768
1769 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
1770 (unsigned long long)dma_addr, dma_len, write);
1771
1772 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1773 write, ESP_CMD_DMA | ESP_CMD_TI);
1774 esp_event(esp, ESP_EVENT_DATA_DONE);
1775 break;
1776 }
1777 case ESP_EVENT_DATA_DONE: {
1778 struct esp_cmd_entry *ent = esp->active_cmd;
1779 struct scsi_cmnd *cmd = ent->cmd;
1780 int bytes_sent;
1781
1782 if (esp->ops->dma_error(esp)) {
1783 shost_printk(KERN_INFO, esp->host,
1784 "data done, DMA error, resetting\n");
1785 esp_schedule_reset(esp);
1786 return 0;
1787 }
1788
1789 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1790
1791
1792 esp->ops->dma_drain(esp);
1793 }
1794 esp->ops->dma_invalidate(esp);
1795
1796 if (esp->ireg != ESP_INTR_BSERV) {
1797
1798
1799
1800 shost_printk(KERN_INFO, esp->host,
1801 "data done, not BSERV, resetting\n");
1802 esp_schedule_reset(esp);
1803 return 0;
1804 }
1805
1806 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1807
1808 esp_log_datadone("data done flgs[%x] sent[%d]\n",
1809 ent->flags, bytes_sent);
1810
1811 if (bytes_sent < 0) {
1812
1813 esp_schedule_reset(esp);
1814 return 0;
1815 }
1816
1817 esp_advance_dma(esp, ent, cmd, bytes_sent);
1818 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1819 goto again;
1820 }
1821
1822 case ESP_EVENT_STATUS: {
1823 struct esp_cmd_entry *ent = esp->active_cmd;
1824
1825 if (esp->ireg & ESP_INTR_FDONE) {
1826 ent->status = esp_read8(ESP_FDATA);
1827 ent->message = esp_read8(ESP_FDATA);
1828 scsi_esp_cmd(esp, ESP_CMD_MOK);
1829 } else if (esp->ireg == ESP_INTR_BSERV) {
1830 ent->status = esp_read8(ESP_FDATA);
1831 ent->message = 0xff;
1832 esp_event(esp, ESP_EVENT_MSGIN);
1833 return 0;
1834 }
1835
1836 if (ent->message != COMMAND_COMPLETE) {
1837 shost_printk(KERN_INFO, esp->host,
1838 "Unexpected message %x in status\n",
1839 ent->message);
1840 esp_schedule_reset(esp);
1841 return 0;
1842 }
1843
1844 esp_event(esp, ESP_EVENT_FREE_BUS);
1845 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1846 break;
1847 }
1848 case ESP_EVENT_FREE_BUS: {
1849 struct esp_cmd_entry *ent = esp->active_cmd;
1850 struct scsi_cmnd *cmd = ent->cmd;
1851
1852 if (ent->message == COMMAND_COMPLETE ||
1853 ent->message == DISCONNECT)
1854 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1855
1856 if (ent->message == COMMAND_COMPLETE) {
1857 esp_log_cmddone("Command done status[%x] message[%x]\n",
1858 ent->status, ent->message);
1859 if (ent->status == SAM_STAT_TASK_SET_FULL)
1860 esp_event_queue_full(esp, ent);
1861
1862 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1863 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1864 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1865 esp_autosense(esp, ent);
1866 } else {
1867 esp_cmd_is_done(esp, ent, cmd,
1868 compose_result(ent->status,
1869 ent->message,
1870 DID_OK));
1871 }
1872 } else if (ent->message == DISCONNECT) {
1873 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
1874 cmd->device->id,
1875 ent->tag[0], ent->tag[1]);
1876
1877 esp->active_cmd = NULL;
1878 esp_maybe_execute_command(esp);
1879 } else {
1880 shost_printk(KERN_INFO, esp->host,
1881 "Unexpected message %x in freebus\n",
1882 ent->message);
1883 esp_schedule_reset(esp);
1884 return 0;
1885 }
1886 if (esp->active_cmd)
1887 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1888 break;
1889 }
1890 case ESP_EVENT_MSGOUT: {
1891 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1892
1893 if (esp_debug & ESP_DEBUG_MSGOUT) {
1894 int i;
1895 printk("ESP: Sending message [ ");
1896 for (i = 0; i < esp->msg_out_len; i++)
1897 printk("%02x ", esp->msg_out[i]);
1898 printk("]\n");
1899 }
1900
1901 if (esp->rev == FASHME) {
1902 int i;
1903
1904
1905 for (i = 0; i < esp->msg_out_len; i++) {
1906 esp_write8(esp->msg_out[i], ESP_FDATA);
1907 esp_write8(0, ESP_FDATA);
1908 }
1909 scsi_esp_cmd(esp, ESP_CMD_TI);
1910 } else {
1911 if (esp->msg_out_len == 1) {
1912 esp_write8(esp->msg_out[0], ESP_FDATA);
1913 scsi_esp_cmd(esp, ESP_CMD_TI);
1914 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1915 for (i = 0; i < esp->msg_out_len; i++)
1916 esp_write8(esp->msg_out[i], ESP_FDATA);
1917 scsi_esp_cmd(esp, ESP_CMD_TI);
1918 } else {
1919
1920 memcpy(esp->command_block,
1921 esp->msg_out,
1922 esp->msg_out_len);
1923
1924 esp->ops->send_dma_cmd(esp,
1925 esp->command_block_dma,
1926 esp->msg_out_len,
1927 esp->msg_out_len,
1928 0,
1929 ESP_CMD_DMA|ESP_CMD_TI);
1930 }
1931 }
1932 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1933 break;
1934 }
1935 case ESP_EVENT_MSGOUT_DONE:
1936 if (esp->rev == FASHME) {
1937 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1938 } else {
1939 if (esp->msg_out_len > 1)
1940 esp->ops->dma_invalidate(esp);
1941
1942
1943
1944
1945 if (!(esp->ireg & ESP_INTR_DC))
1946 scsi_esp_cmd(esp, ESP_CMD_NULL);
1947 }
1948
1949 esp->msg_out_len = 0;
1950
1951 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1952 goto again;
1953 case ESP_EVENT_MSGIN:
1954 if (esp->ireg & ESP_INTR_BSERV) {
1955 if (esp->rev == FASHME) {
1956 if (!(esp_read8(ESP_STATUS2) &
1957 ESP_STAT2_FEMPTY))
1958 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1959 } else {
1960 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1961 if (esp->rev == ESP100)
1962 scsi_esp_cmd(esp, ESP_CMD_NULL);
1963 }
1964 scsi_esp_cmd(esp, ESP_CMD_TI);
1965 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1966 return 1;
1967 }
1968 if (esp->ireg & ESP_INTR_FDONE) {
1969 u8 val;
1970
1971 if (esp->rev == FASHME)
1972 val = esp->fifo[0];
1973 else
1974 val = esp_read8(ESP_FDATA);
1975 esp->msg_in[esp->msg_in_len++] = val;
1976
1977 esp_log_msgin("Got msgin byte %x\n", val);
1978
1979 if (!esp_msgin_process(esp))
1980 esp->msg_in_len = 0;
1981
1982 if (esp->rev == FASHME)
1983 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1984
1985 scsi_esp_cmd(esp, ESP_CMD_MOK);
1986
1987
1988 if (esp->event == ESP_EVENT_RESET)
1989 return 0;
1990
1991 if (esp->event != ESP_EVENT_FREE_BUS)
1992 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1993 } else {
1994 shost_printk(KERN_INFO, esp->host,
1995 "MSGIN neither BSERV not FDON, resetting");
1996 esp_schedule_reset(esp);
1997 return 0;
1998 }
1999 break;
2000 case ESP_EVENT_CMD_START:
2001 memcpy(esp->command_block, esp->cmd_bytes_ptr,
2002 esp->cmd_bytes_left);
2003 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
2004 esp_event(esp, ESP_EVENT_CMD_DONE);
2005 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2006 break;
2007 case ESP_EVENT_CMD_DONE:
2008 esp->ops->dma_invalidate(esp);
2009 if (esp->ireg & ESP_INTR_BSERV) {
2010 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2011 goto again;
2012 }
2013 esp_schedule_reset(esp);
2014 return 0;
2015
2016 case ESP_EVENT_RESET:
2017 scsi_esp_cmd(esp, ESP_CMD_RS);
2018 break;
2019
2020 default:
2021 shost_printk(KERN_INFO, esp->host,
2022 "Unexpected event %x, resetting\n", esp->event);
2023 esp_schedule_reset(esp);
2024 return 0;
2025 }
2026 return 1;
2027}
2028
2029static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2030{
2031 struct scsi_cmnd *cmd = ent->cmd;
2032
2033 esp_unmap_dma(esp, cmd);
2034 esp_free_lun_tag(ent, cmd->device->hostdata);
2035 cmd->result = DID_RESET << 16;
2036
2037 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
2038 esp_unmap_sense(esp, ent);
2039
2040 cmd->scsi_done(cmd);
2041 list_del(&ent->list);
2042 esp_put_ent(esp, ent);
2043}
2044
2045static void esp_clear_hold(struct scsi_device *dev, void *data)
2046{
2047 struct esp_lun_data *lp = dev->hostdata;
2048
2049 BUG_ON(lp->num_tagged);
2050 lp->hold = 0;
2051}
2052
2053static void esp_reset_cleanup(struct esp *esp)
2054{
2055 struct esp_cmd_entry *ent, *tmp;
2056 int i;
2057
2058 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2059 struct scsi_cmnd *cmd = ent->cmd;
2060
2061 list_del(&ent->list);
2062 cmd->result = DID_RESET << 16;
2063 cmd->scsi_done(cmd);
2064 esp_put_ent(esp, ent);
2065 }
2066
2067 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2068 if (ent == esp->active_cmd)
2069 esp->active_cmd = NULL;
2070 esp_reset_cleanup_one(esp, ent);
2071 }
2072
2073 BUG_ON(esp->active_cmd != NULL);
2074
2075
2076 for (i = 0; i < ESP_MAX_TARGET; i++) {
2077 struct esp_target_data *tp = &esp->target[i];
2078
2079 tp->esp_period = 0;
2080 tp->esp_offset = 0;
2081 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2082 ESP_CONFIG3_FSCSI |
2083 ESP_CONFIG3_FAST);
2084 tp->flags &= ~ESP_TGT_WIDE;
2085 tp->flags |= ESP_TGT_CHECK_NEGO;
2086
2087 if (tp->starget)
2088 __starget_for_each_device(tp->starget, NULL,
2089 esp_clear_hold);
2090 }
2091 esp->flags &= ~ESP_FLAG_RESETTING;
2092}
2093
2094
2095static void __esp_interrupt(struct esp *esp)
2096{
2097 int finish_reset, intr_done;
2098 u8 phase;
2099
2100
2101
2102
2103 esp->sreg = esp_read8(ESP_STATUS);
2104 esp->seqreg = esp_read8(ESP_SSTEP);
2105 esp->ireg = esp_read8(ESP_INTRPT);
2106
2107 if (esp->flags & ESP_FLAG_RESETTING) {
2108 finish_reset = 1;
2109 } else {
2110 if (esp_check_gross_error(esp))
2111 return;
2112
2113 finish_reset = esp_check_spur_intr(esp);
2114 if (finish_reset < 0)
2115 return;
2116 }
2117
2118 if (esp->ireg & ESP_INTR_SR)
2119 finish_reset = 1;
2120
2121 if (finish_reset) {
2122 esp_reset_cleanup(esp);
2123 if (esp->eh_reset) {
2124 complete(esp->eh_reset);
2125 esp->eh_reset = NULL;
2126 }
2127 return;
2128 }
2129
2130 phase = (esp->sreg & ESP_STAT_PMASK);
2131 if (esp->rev == FASHME) {
2132 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2133 esp->select_state == ESP_SELECT_NONE &&
2134 esp->event != ESP_EVENT_STATUS &&
2135 esp->event != ESP_EVENT_DATA_DONE) ||
2136 (esp->ireg & ESP_INTR_RSEL)) {
2137 esp->sreg2 = esp_read8(ESP_STATUS2);
2138 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2139 (esp->sreg2 & ESP_STAT2_F1BYTE))
2140 hme_read_fifo(esp);
2141 }
2142 }
2143
2144 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
2145 "sreg2[%02x] ireg[%02x]\n",
2146 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2147
2148 intr_done = 0;
2149
2150 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2151 shost_printk(KERN_INFO, esp->host,
2152 "unexpected IREG %02x\n", esp->ireg);
2153 if (esp->ireg & ESP_INTR_IC)
2154 esp_dump_cmd_log(esp);
2155
2156 esp_schedule_reset(esp);
2157 } else {
2158 if (esp->ireg & ESP_INTR_RSEL) {
2159 if (esp->active_cmd)
2160 (void) esp_finish_select(esp);
2161 intr_done = esp_reconnect(esp);
2162 } else {
2163
2164 if (esp->select_state != ESP_SELECT_NONE)
2165 intr_done = esp_finish_select(esp);
2166 }
2167 }
2168 while (!intr_done)
2169 intr_done = esp_process_event(esp);
2170}
2171
2172irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2173{
2174 struct esp *esp = dev_id;
2175 unsigned long flags;
2176 irqreturn_t ret;
2177
2178 spin_lock_irqsave(esp->host->host_lock, flags);
2179 ret = IRQ_NONE;
2180 if (esp->ops->irq_pending(esp)) {
2181 ret = IRQ_HANDLED;
2182 for (;;) {
2183 int i;
2184
2185 __esp_interrupt(esp);
2186 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2187 break;
2188 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2189
2190 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2191 if (esp->ops->irq_pending(esp))
2192 break;
2193 }
2194 if (i == ESP_QUICKIRQ_LIMIT)
2195 break;
2196 }
2197 }
2198 spin_unlock_irqrestore(esp->host->host_lock, flags);
2199
2200 return ret;
2201}
2202EXPORT_SYMBOL(scsi_esp_intr);
2203
2204static void esp_get_revision(struct esp *esp)
2205{
2206 u8 val;
2207
2208 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2209 if (esp->config2 == 0) {
2210 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2211 esp_write8(esp->config2, ESP_CFG2);
2212
2213 val = esp_read8(ESP_CFG2);
2214 val &= ~ESP_CONFIG2_MAGIC;
2215
2216 esp->config2 = 0;
2217 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2218
2219
2220
2221
2222
2223 esp->rev = ESP100;
2224 return;
2225 }
2226 }
2227
2228 esp_set_all_config3(esp, 5);
2229 esp->prev_cfg3 = 5;
2230 esp_write8(esp->config2, ESP_CFG2);
2231 esp_write8(0, ESP_CFG3);
2232 esp_write8(esp->prev_cfg3, ESP_CFG3);
2233
2234 val = esp_read8(ESP_CFG3);
2235 if (val != 5) {
2236
2237
2238
2239 esp->rev = ESP100A;
2240 } else {
2241 esp_set_all_config3(esp, 0);
2242 esp->prev_cfg3 = 0;
2243 esp_write8(esp->prev_cfg3, ESP_CFG3);
2244
2245
2246
2247
2248 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2249 esp->rev = FAST;
2250 esp->sync_defp = SYNC_DEFP_FAST;
2251 } else {
2252 esp->rev = ESP236;
2253 }
2254 }
2255}
2256
2257static void esp_init_swstate(struct esp *esp)
2258{
2259 int i;
2260
2261 INIT_LIST_HEAD(&esp->queued_cmds);
2262 INIT_LIST_HEAD(&esp->active_cmds);
2263 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2264
2265
2266
2267
2268
2269 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2270 esp->target[i].flags = 0;
2271 esp->target[i].nego_goal_period = 0;
2272 esp->target[i].nego_goal_offset = 0;
2273 esp->target[i].nego_goal_width = 0;
2274 esp->target[i].nego_goal_tags = 0;
2275 }
2276}
2277
2278
2279static void esp_bootup_reset(struct esp *esp)
2280{
2281 u8 val;
2282
2283
2284 esp->ops->reset_dma(esp);
2285
2286
2287 esp_reset_esp(esp);
2288
2289
2290 val = esp_read8(ESP_CFG1);
2291 val |= ESP_CONFIG1_SRRDISAB;
2292 esp_write8(val, ESP_CFG1);
2293
2294 scsi_esp_cmd(esp, ESP_CMD_RS);
2295 udelay(400);
2296
2297 esp_write8(esp->config1, ESP_CFG1);
2298
2299
2300 esp_read8(ESP_INTRPT);
2301}
2302
2303static void esp_set_clock_params(struct esp *esp)
2304{
2305 int fhz;
2306 u8 ccf;
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340 fhz = esp->cfreq;
2341
2342 ccf = ((fhz / 1000000) + 4) / 5;
2343 if (ccf == 1)
2344 ccf = 2;
2345
2346
2347
2348
2349
2350
2351 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2352 fhz = 20000000;
2353 ccf = 4;
2354 }
2355
2356 esp->cfact = (ccf == 8 ? 0 : ccf);
2357 esp->cfreq = fhz;
2358 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2359 esp->ctick = ESP_TICK(ccf, esp->ccycle);
2360 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2361 esp->sync_defp = SYNC_DEFP_SLOW;
2362}
2363
2364static const char *esp_chip_names[] = {
2365 "ESP100",
2366 "ESP100A",
2367 "ESP236",
2368 "FAS236",
2369 "FAS100A",
2370 "FAST",
2371 "FASHME",
2372 "AM53C974",
2373};
2374
2375static struct scsi_transport_template *esp_transport_template;
2376
2377int scsi_esp_register(struct esp *esp)
2378{
2379 static int instance;
2380 int err;
2381
2382 if (!esp->num_tags)
2383 esp->num_tags = ESP_DEFAULT_TAGS;
2384 esp->host->transportt = esp_transport_template;
2385 esp->host->max_lun = ESP_MAX_LUN;
2386 esp->host->cmd_per_lun = 2;
2387 esp->host->unique_id = instance;
2388
2389 esp_set_clock_params(esp);
2390
2391 esp_get_revision(esp);
2392
2393 esp_init_swstate(esp);
2394
2395 esp_bootup_reset(esp);
2396
2397 dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2398 esp->host->unique_id, esp->regs, esp->dma_regs,
2399 esp->host->irq);
2400 dev_printk(KERN_INFO, esp->dev,
2401 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2402 esp->host->unique_id, esp_chip_names[esp->rev],
2403 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2404
2405
2406 ssleep(esp_bus_reset_settle);
2407
2408 err = scsi_add_host(esp->host, esp->dev);
2409 if (err)
2410 return err;
2411
2412 instance++;
2413
2414 scsi_scan_host(esp->host);
2415
2416 return 0;
2417}
2418EXPORT_SYMBOL(scsi_esp_register);
2419
2420void scsi_esp_unregister(struct esp *esp)
2421{
2422 scsi_remove_host(esp->host);
2423}
2424EXPORT_SYMBOL(scsi_esp_unregister);
2425
2426static int esp_target_alloc(struct scsi_target *starget)
2427{
2428 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2429 struct esp_target_data *tp = &esp->target[starget->id];
2430
2431 tp->starget = starget;
2432
2433 return 0;
2434}
2435
2436static void esp_target_destroy(struct scsi_target *starget)
2437{
2438 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2439 struct esp_target_data *tp = &esp->target[starget->id];
2440
2441 tp->starget = NULL;
2442}
2443
2444static int esp_slave_alloc(struct scsi_device *dev)
2445{
2446 struct esp *esp = shost_priv(dev->host);
2447 struct esp_target_data *tp = &esp->target[dev->id];
2448 struct esp_lun_data *lp;
2449
2450 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2451 if (!lp)
2452 return -ENOMEM;
2453 dev->hostdata = lp;
2454
2455 spi_min_period(tp->starget) = esp->min_period;
2456 spi_max_offset(tp->starget) = 15;
2457
2458 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2459 spi_max_width(tp->starget) = 1;
2460 else
2461 spi_max_width(tp->starget) = 0;
2462
2463 return 0;
2464}
2465
2466static int esp_slave_configure(struct scsi_device *dev)
2467{
2468 struct esp *esp = shost_priv(dev->host);
2469 struct esp_target_data *tp = &esp->target[dev->id];
2470
2471 if (dev->tagged_supported)
2472 scsi_change_queue_depth(dev, esp->num_tags);
2473
2474 tp->flags |= ESP_TGT_DISCONNECT;
2475
2476 if (!spi_initial_dv(dev->sdev_target))
2477 spi_dv_device(dev);
2478
2479 return 0;
2480}
2481
2482static void esp_slave_destroy(struct scsi_device *dev)
2483{
2484 struct esp_lun_data *lp = dev->hostdata;
2485
2486 kfree(lp);
2487 dev->hostdata = NULL;
2488}
2489
2490static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2491{
2492 struct esp *esp = shost_priv(cmd->device->host);
2493 struct esp_cmd_entry *ent, *tmp;
2494 struct completion eh_done;
2495 unsigned long flags;
2496
2497
2498
2499
2500 spin_lock_irqsave(esp->host->host_lock, flags);
2501 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2502 cmd, cmd->cmnd[0]);
2503 ent = esp->active_cmd;
2504 if (ent)
2505 shost_printk(KERN_ERR, esp->host,
2506 "Current command [%p:%02x]\n",
2507 ent->cmd, ent->cmd->cmnd[0]);
2508 list_for_each_entry(ent, &esp->queued_cmds, list) {
2509 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2510 ent->cmd, ent->cmd->cmnd[0]);
2511 }
2512 list_for_each_entry(ent, &esp->active_cmds, list) {
2513 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2514 ent->cmd, ent->cmd->cmnd[0]);
2515 }
2516 esp_dump_cmd_log(esp);
2517 spin_unlock_irqrestore(esp->host->host_lock, flags);
2518
2519 spin_lock_irqsave(esp->host->host_lock, flags);
2520
2521 ent = NULL;
2522 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2523 if (tmp->cmd == cmd) {
2524 ent = tmp;
2525 break;
2526 }
2527 }
2528
2529 if (ent) {
2530
2531
2532
2533 list_del(&ent->list);
2534
2535 cmd->result = DID_ABORT << 16;
2536 cmd->scsi_done(cmd);
2537
2538 esp_put_ent(esp, ent);
2539
2540 goto out_success;
2541 }
2542
2543 init_completion(&eh_done);
2544
2545 ent = esp->active_cmd;
2546 if (ent && ent->cmd == cmd) {
2547
2548
2549
2550
2551 if (esp->msg_out_len)
2552 goto out_failure;
2553
2554
2555
2556
2557 esp->msg_out[0] = ABORT_TASK_SET;
2558 esp->msg_out_len = 1;
2559 ent->eh_done = &eh_done;
2560
2561 scsi_esp_cmd(esp, ESP_CMD_SATN);
2562 } else {
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579 goto out_failure;
2580 }
2581
2582 spin_unlock_irqrestore(esp->host->host_lock, flags);
2583
2584 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2585 spin_lock_irqsave(esp->host->host_lock, flags);
2586 ent->eh_done = NULL;
2587 spin_unlock_irqrestore(esp->host->host_lock, flags);
2588
2589 return FAILED;
2590 }
2591
2592 return SUCCESS;
2593
2594out_success:
2595 spin_unlock_irqrestore(esp->host->host_lock, flags);
2596 return SUCCESS;
2597
2598out_failure:
2599
2600
2601
2602
2603 spin_unlock_irqrestore(esp->host->host_lock, flags);
2604 return FAILED;
2605}
2606
2607static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2608{
2609 struct esp *esp = shost_priv(cmd->device->host);
2610 struct completion eh_reset;
2611 unsigned long flags;
2612
2613 init_completion(&eh_reset);
2614
2615 spin_lock_irqsave(esp->host->host_lock, flags);
2616
2617 esp->eh_reset = &eh_reset;
2618
2619
2620
2621
2622
2623
2624 esp->flags |= ESP_FLAG_RESETTING;
2625 scsi_esp_cmd(esp, ESP_CMD_RS);
2626
2627 spin_unlock_irqrestore(esp->host->host_lock, flags);
2628
2629 ssleep(esp_bus_reset_settle);
2630
2631 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2632 spin_lock_irqsave(esp->host->host_lock, flags);
2633 esp->eh_reset = NULL;
2634 spin_unlock_irqrestore(esp->host->host_lock, flags);
2635
2636 return FAILED;
2637 }
2638
2639 return SUCCESS;
2640}
2641
2642
2643static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2644{
2645 struct esp *esp = shost_priv(cmd->device->host);
2646 unsigned long flags;
2647
2648 spin_lock_irqsave(esp->host->host_lock, flags);
2649 esp_bootup_reset(esp);
2650 esp_reset_cleanup(esp);
2651 spin_unlock_irqrestore(esp->host->host_lock, flags);
2652
2653 ssleep(esp_bus_reset_settle);
2654
2655 return SUCCESS;
2656}
2657
2658static const char *esp_info(struct Scsi_Host *host)
2659{
2660 return "esp";
2661}
2662
2663struct scsi_host_template scsi_esp_template = {
2664 .module = THIS_MODULE,
2665 .name = "esp",
2666 .info = esp_info,
2667 .queuecommand = esp_queuecommand,
2668 .target_alloc = esp_target_alloc,
2669 .target_destroy = esp_target_destroy,
2670 .slave_alloc = esp_slave_alloc,
2671 .slave_configure = esp_slave_configure,
2672 .slave_destroy = esp_slave_destroy,
2673 .eh_abort_handler = esp_eh_abort_handler,
2674 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2675 .eh_host_reset_handler = esp_eh_host_reset_handler,
2676 .can_queue = 7,
2677 .this_id = 7,
2678 .sg_tablesize = SG_ALL,
2679 .max_sectors = 0xffff,
2680 .skip_settle_delay = 1,
2681};
2682EXPORT_SYMBOL(scsi_esp_template);
2683
2684static void esp_get_signalling(struct Scsi_Host *host)
2685{
2686 struct esp *esp = shost_priv(host);
2687 enum spi_signal_type type;
2688
2689 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2690 type = SPI_SIGNAL_HVD;
2691 else
2692 type = SPI_SIGNAL_SE;
2693
2694 spi_signalling(host) = type;
2695}
2696
2697static void esp_set_offset(struct scsi_target *target, int offset)
2698{
2699 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2700 struct esp *esp = shost_priv(host);
2701 struct esp_target_data *tp = &esp->target[target->id];
2702
2703 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2704 tp->nego_goal_offset = 0;
2705 else
2706 tp->nego_goal_offset = offset;
2707 tp->flags |= ESP_TGT_CHECK_NEGO;
2708}
2709
2710static void esp_set_period(struct scsi_target *target, int period)
2711{
2712 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2713 struct esp *esp = shost_priv(host);
2714 struct esp_target_data *tp = &esp->target[target->id];
2715
2716 tp->nego_goal_period = period;
2717 tp->flags |= ESP_TGT_CHECK_NEGO;
2718}
2719
2720static void esp_set_width(struct scsi_target *target, int width)
2721{
2722 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2723 struct esp *esp = shost_priv(host);
2724 struct esp_target_data *tp = &esp->target[target->id];
2725
2726 tp->nego_goal_width = (width ? 1 : 0);
2727 tp->flags |= ESP_TGT_CHECK_NEGO;
2728}
2729
2730static struct spi_function_template esp_transport_ops = {
2731 .set_offset = esp_set_offset,
2732 .show_offset = 1,
2733 .set_period = esp_set_period,
2734 .show_period = 1,
2735 .set_width = esp_set_width,
2736 .show_width = 1,
2737 .get_signalling = esp_get_signalling,
2738};
2739
2740static int __init esp_init(void)
2741{
2742 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2743 sizeof(struct esp_cmd_priv));
2744
2745 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2746 if (!esp_transport_template)
2747 return -ENODEV;
2748
2749 return 0;
2750}
2751
2752static void __exit esp_exit(void)
2753{
2754 spi_release_transport(esp_transport_template);
2755}
2756
2757MODULE_DESCRIPTION("ESP SCSI driver core");
2758MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2759MODULE_LICENSE("GPL");
2760MODULE_VERSION(DRV_VERSION);
2761
2762module_param(esp_bus_reset_settle, int, 0);
2763MODULE_PARM_DESC(esp_bus_reset_settle,
2764 "ESP scsi bus reset delay in seconds");
2765
2766module_param(esp_debug, int, 0);
2767MODULE_PARM_DESC(esp_debug,
2768"ESP bitmapped debugging message enable value:\n"
2769" 0x00000001 Log interrupt events\n"
2770" 0x00000002 Log scsi commands\n"
2771" 0x00000004 Log resets\n"
2772" 0x00000008 Log message in events\n"
2773" 0x00000010 Log message out events\n"
2774" 0x00000020 Log command completion\n"
2775" 0x00000040 Log disconnects\n"
2776" 0x00000080 Log data start\n"
2777" 0x00000100 Log data done\n"
2778" 0x00000200 Log reconnects\n"
2779" 0x00000400 Log auto-sense data\n"
2780);
2781
2782module_init(esp_init);
2783module_exit(esp_exit);
2784
2785#ifdef CONFIG_SCSI_ESP_PIO
2786static inline unsigned int esp_wait_for_fifo(struct esp *esp)
2787{
2788 int i = 500000;
2789
2790 do {
2791 unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
2792
2793 if (fbytes)
2794 return fbytes;
2795
2796 udelay(1);
2797 } while (--i);
2798
2799 shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
2800 esp_read8(ESP_STATUS));
2801 return 0;
2802}
2803
2804static inline int esp_wait_for_intr(struct esp *esp)
2805{
2806 int i = 500000;
2807
2808 do {
2809 esp->sreg = esp_read8(ESP_STATUS);
2810 if (esp->sreg & ESP_STAT_INTR)
2811 return 0;
2812
2813 udelay(1);
2814 } while (--i);
2815
2816 shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
2817 esp->sreg);
2818 return 1;
2819}
2820
2821#define ESP_FIFO_SIZE 16
2822
2823void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
2824 u32 dma_count, int write, u8 cmd)
2825{
2826 u8 phase = esp->sreg & ESP_STAT_PMASK;
2827
2828 cmd &= ~ESP_CMD_DMA;
2829 esp->send_cmd_error = 0;
2830
2831 if (write) {
2832 u8 *dst = (u8 *)addr;
2833 u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
2834
2835 scsi_esp_cmd(esp, cmd);
2836
2837 while (1) {
2838 if (!esp_wait_for_fifo(esp))
2839 break;
2840
2841 *dst++ = readb(esp->fifo_reg);
2842 --esp_count;
2843
2844 if (!esp_count)
2845 break;
2846
2847 if (esp_wait_for_intr(esp)) {
2848 esp->send_cmd_error = 1;
2849 break;
2850 }
2851
2852 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2853 break;
2854
2855 esp->ireg = esp_read8(ESP_INTRPT);
2856 if (esp->ireg & mask) {
2857 esp->send_cmd_error = 1;
2858 break;
2859 }
2860
2861 if (phase == ESP_MIP)
2862 esp_write8(ESP_CMD_MOK, ESP_CMD);
2863
2864 esp_write8(ESP_CMD_TI, ESP_CMD);
2865 }
2866 } else {
2867 unsigned int n = ESP_FIFO_SIZE;
2868 u8 *src = (u8 *)addr;
2869
2870 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
2871
2872 if (n > esp_count)
2873 n = esp_count;
2874 writesb(esp->fifo_reg, src, n);
2875 src += n;
2876 esp_count -= n;
2877
2878 scsi_esp_cmd(esp, cmd);
2879
2880 while (esp_count) {
2881 if (esp_wait_for_intr(esp)) {
2882 esp->send_cmd_error = 1;
2883 break;
2884 }
2885
2886 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2887 break;
2888
2889 esp->ireg = esp_read8(ESP_INTRPT);
2890 if (esp->ireg & ~ESP_INTR_BSERV) {
2891 esp->send_cmd_error = 1;
2892 break;
2893 }
2894
2895 n = ESP_FIFO_SIZE -
2896 (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
2897
2898 if (n > esp_count)
2899 n = esp_count;
2900 writesb(esp->fifo_reg, src, n);
2901 src += n;
2902 esp_count -= n;
2903
2904 esp_write8(ESP_CMD_TI, ESP_CMD);
2905 }
2906 }
2907
2908 esp->send_cmd_residual = esp_count;
2909}
2910EXPORT_SYMBOL(esp_send_pio_cmd);
2911#endif
2912