1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/delay.h>
11#include <linux/list.h>
12#include <linux/completion.h>
13#include <linux/kallsyms.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/init.h>
17#include <linux/irqreturn.h>
18
19#include <asm/irq.h>
20#include <asm/io.h>
21#include <asm/dma.h>
22
23#include <scsi/scsi.h>
24#include <scsi/scsi_host.h>
25#include <scsi/scsi_cmnd.h>
26#include <scsi/scsi_device.h>
27#include <scsi/scsi_tcq.h>
28#include <scsi/scsi_dbg.h>
29#include <scsi/scsi_transport_spi.h>
30
31#include "esp_scsi.h"
32
33#define DRV_MODULE_NAME "esp"
34#define PFX DRV_MODULE_NAME ": "
35#define DRV_VERSION "2.000"
36#define DRV_MODULE_RELDATE "April 19, 2007"
37
38
39static int esp_bus_reset_settle = 3;
40
41static u32 esp_debug;
42#define ESP_DEBUG_INTR 0x00000001
43#define ESP_DEBUG_SCSICMD 0x00000002
44#define ESP_DEBUG_RESET 0x00000004
45#define ESP_DEBUG_MSGIN 0x00000008
46#define ESP_DEBUG_MSGOUT 0x00000010
47#define ESP_DEBUG_CMDDONE 0x00000020
48#define ESP_DEBUG_DISCONNECT 0x00000040
49#define ESP_DEBUG_DATASTART 0x00000080
50#define ESP_DEBUG_DATADONE 0x00000100
51#define ESP_DEBUG_RECONNECT 0x00000200
52#define ESP_DEBUG_AUTOSENSE 0x00000400
53#define ESP_DEBUG_EVENT 0x00000800
54#define ESP_DEBUG_COMMAND 0x00001000
55
56#define esp_log_intr(f, a...) \
57do { if (esp_debug & ESP_DEBUG_INTR) \
58 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
59} while (0)
60
61#define esp_log_reset(f, a...) \
62do { if (esp_debug & ESP_DEBUG_RESET) \
63 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
64} while (0)
65
66#define esp_log_msgin(f, a...) \
67do { if (esp_debug & ESP_DEBUG_MSGIN) \
68 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
69} while (0)
70
71#define esp_log_msgout(f, a...) \
72do { if (esp_debug & ESP_DEBUG_MSGOUT) \
73 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
74} while (0)
75
76#define esp_log_cmddone(f, a...) \
77do { if (esp_debug & ESP_DEBUG_CMDDONE) \
78 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
79} while (0)
80
81#define esp_log_disconnect(f, a...) \
82do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
83 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
84} while (0)
85
86#define esp_log_datastart(f, a...) \
87do { if (esp_debug & ESP_DEBUG_DATASTART) \
88 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
89} while (0)
90
91#define esp_log_datadone(f, a...) \
92do { if (esp_debug & ESP_DEBUG_DATADONE) \
93 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
94} while (0)
95
96#define esp_log_reconnect(f, a...) \
97do { if (esp_debug & ESP_DEBUG_RECONNECT) \
98 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
99} while (0)
100
101#define esp_log_autosense(f, a...) \
102do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
103 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
104} while (0)
105
106#define esp_log_event(f, a...) \
107do { if (esp_debug & ESP_DEBUG_EVENT) \
108 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
109} while (0)
110
111#define esp_log_command(f, a...) \
112do { if (esp_debug & ESP_DEBUG_COMMAND) \
113 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
114} while (0)
115
116#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
117#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
118
119static void esp_log_fill_regs(struct esp *esp,
120 struct esp_event_ent *p)
121{
122 p->sreg = esp->sreg;
123 p->seqreg = esp->seqreg;
124 p->sreg2 = esp->sreg2;
125 p->ireg = esp->ireg;
126 p->select_state = esp->select_state;
127 p->event = esp->event;
128}
129
130void scsi_esp_cmd(struct esp *esp, u8 val)
131{
132 struct esp_event_ent *p;
133 int idx = esp->esp_event_cur;
134
135 p = &esp->esp_event_log[idx];
136 p->type = ESP_EVENT_TYPE_CMD;
137 p->val = val;
138 esp_log_fill_regs(esp, p);
139
140 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
141
142 esp_log_command("cmd[%02x]\n", val);
143 esp_write8(val, ESP_CMD);
144}
145EXPORT_SYMBOL(scsi_esp_cmd);
146
147static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
148{
149 if (esp->flags & ESP_FLAG_USE_FIFO) {
150 int i;
151
152 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
153 for (i = 0; i < len; i++)
154 esp_write8(esp->command_block[i], ESP_FDATA);
155 scsi_esp_cmd(esp, cmd);
156 } else {
157 if (esp->rev == FASHME)
158 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
159 cmd |= ESP_CMD_DMA;
160 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
161 len, max_len, 0, cmd);
162 }
163}
164
165static void esp_event(struct esp *esp, u8 val)
166{
167 struct esp_event_ent *p;
168 int idx = esp->esp_event_cur;
169
170 p = &esp->esp_event_log[idx];
171 p->type = ESP_EVENT_TYPE_EVENT;
172 p->val = val;
173 esp_log_fill_regs(esp, p);
174
175 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
176
177 esp->event = val;
178}
179
180static void esp_dump_cmd_log(struct esp *esp)
181{
182 int idx = esp->esp_event_cur;
183 int stop = idx;
184
185 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
186 do {
187 struct esp_event_ent *p = &esp->esp_event_log[idx];
188
189 shost_printk(KERN_INFO, esp->host,
190 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
191 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
192 idx,
193 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
194 p->val, p->sreg, p->seqreg,
195 p->sreg2, p->ireg, p->select_state, p->event);
196
197 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
198 } while (idx != stop);
199}
200
201static void esp_flush_fifo(struct esp *esp)
202{
203 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
204 if (esp->rev == ESP236) {
205 int lim = 1000;
206
207 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
208 if (--lim == 0) {
209 shost_printk(KERN_ALERT, esp->host,
210 "ESP_FF_BYTES will not clear!\n");
211 break;
212 }
213 udelay(1);
214 }
215 }
216}
217
218static void hme_read_fifo(struct esp *esp)
219{
220 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
221 int idx = 0;
222
223 while (fcnt--) {
224 esp->fifo[idx++] = esp_read8(ESP_FDATA);
225 esp->fifo[idx++] = esp_read8(ESP_FDATA);
226 }
227 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
228 esp_write8(0, ESP_FDATA);
229 esp->fifo[idx++] = esp_read8(ESP_FDATA);
230 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
231 }
232 esp->fifo_cnt = idx;
233}
234
235static void esp_set_all_config3(struct esp *esp, u8 val)
236{
237 int i;
238
239 for (i = 0; i < ESP_MAX_TARGET; i++)
240 esp->target[i].esp_config3 = val;
241}
242
243
244static void esp_reset_esp(struct esp *esp)
245{
246
247 scsi_esp_cmd(esp, ESP_CMD_RC);
248 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
249 if (esp->rev == FAST)
250 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
251 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
252
253
254
255
256 esp->max_period = ((35 * esp->ccycle) / 1000);
257 if (esp->rev == FAST) {
258 u8 family_code = ESP_FAMILY(esp_read8(ESP_UID));
259
260 if (family_code == ESP_UID_F236) {
261 esp->rev = FAS236;
262 } else if (family_code == ESP_UID_HME) {
263 esp->rev = FASHME;
264 } else if (family_code == ESP_UID_FSC) {
265 esp->rev = FSC;
266
267 esp_write8(ESP_CONFIG4_RADE, ESP_CFG4);
268 } else {
269 esp->rev = FAS100A;
270 }
271 esp->min_period = ((4 * esp->ccycle) / 1000);
272 } else {
273 esp->min_period = ((5 * esp->ccycle) / 1000);
274 }
275 if (esp->rev == FAS236) {
276
277
278
279
280 u8 config4 = ESP_CONFIG4_GE1;
281 esp_write8(config4, ESP_CFG4);
282 config4 = esp_read8(ESP_CFG4);
283 if (config4 & ESP_CONFIG4_GE1) {
284 esp->rev = PCSCSI;
285 esp_write8(esp->config4, ESP_CFG4);
286 }
287 }
288 esp->max_period = (esp->max_period + 3)>>2;
289 esp->min_period = (esp->min_period + 3)>>2;
290
291 esp_write8(esp->config1, ESP_CFG1);
292 switch (esp->rev) {
293 case ESP100:
294
295 break;
296
297 case ESP100A:
298 esp_write8(esp->config2, ESP_CFG2);
299 break;
300
301 case ESP236:
302
303 esp_write8(esp->config2, ESP_CFG2);
304 esp->prev_cfg3 = esp->target[0].esp_config3;
305 esp_write8(esp->prev_cfg3, ESP_CFG3);
306 break;
307
308 case FASHME:
309 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
310 fallthrough;
311
312 case FAS236:
313 case PCSCSI:
314 case FSC:
315 esp_write8(esp->config2, ESP_CFG2);
316 if (esp->rev == FASHME) {
317 u8 cfg3 = esp->target[0].esp_config3;
318
319 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
320 if (esp->scsi_id >= 8)
321 cfg3 |= ESP_CONFIG3_IDBIT3;
322 esp_set_all_config3(esp, cfg3);
323 } else {
324 u32 cfg3 = esp->target[0].esp_config3;
325
326 cfg3 |= ESP_CONFIG3_FCLK;
327 esp_set_all_config3(esp, cfg3);
328 }
329 esp->prev_cfg3 = esp->target[0].esp_config3;
330 esp_write8(esp->prev_cfg3, ESP_CFG3);
331 if (esp->rev == FASHME) {
332 esp->radelay = 80;
333 } else {
334 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
335 esp->radelay = 0;
336 else
337 esp->radelay = 96;
338 }
339 break;
340
341 case FAS100A:
342
343 esp_write8(esp->config2, ESP_CFG2);
344 esp_set_all_config3(esp,
345 (esp->target[0].esp_config3 |
346 ESP_CONFIG3_FCLOCK));
347 esp->prev_cfg3 = esp->target[0].esp_config3;
348 esp_write8(esp->prev_cfg3, ESP_CFG3);
349 esp->radelay = 32;
350 break;
351
352 default:
353 break;
354 }
355
356
357 esp_write8(esp->cfact, ESP_CFACT);
358
359 esp->prev_stp = 0;
360 esp_write8(esp->prev_stp, ESP_STP);
361
362 esp->prev_soff = 0;
363 esp_write8(esp->prev_soff, ESP_SOFF);
364
365 esp_write8(esp->neg_defp, ESP_TIMEO);
366
367
368 esp_read8(ESP_INTRPT);
369 udelay(100);
370}
371
372static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
373{
374 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
375 struct scatterlist *sg = scsi_sglist(cmd);
376 int total = 0, i;
377 struct scatterlist *s;
378
379 if (cmd->sc_data_direction == DMA_NONE)
380 return;
381
382 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
383
384
385
386
387 spriv->num_sg = scsi_sg_count(cmd);
388
389 scsi_for_each_sg(cmd, s, spriv->num_sg, i) {
390 s->dma_address = (uintptr_t)sg_virt(s);
391 total += sg_dma_len(s);
392 }
393 } else {
394 spriv->num_sg = scsi_dma_map(cmd);
395 scsi_for_each_sg(cmd, s, spriv->num_sg, i)
396 total += sg_dma_len(s);
397 }
398 spriv->cur_residue = sg_dma_len(sg);
399 spriv->prv_sg = NULL;
400 spriv->cur_sg = sg;
401 spriv->tot_residue = total;
402}
403
404static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
405 struct scsi_cmnd *cmd)
406{
407 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
408
409 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
410 return ent->sense_dma +
411 (ent->sense_ptr - cmd->sense_buffer);
412 }
413
414 return sg_dma_address(p->cur_sg) +
415 (sg_dma_len(p->cur_sg) -
416 p->cur_residue);
417}
418
419static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
420 struct scsi_cmnd *cmd)
421{
422 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
423
424 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
425 return SCSI_SENSE_BUFFERSIZE -
426 (ent->sense_ptr - cmd->sense_buffer);
427 }
428 return p->cur_residue;
429}
430
431static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
432 struct scsi_cmnd *cmd, unsigned int len)
433{
434 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
435
436 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
437 ent->sense_ptr += len;
438 return;
439 }
440
441 p->cur_residue -= len;
442 p->tot_residue -= len;
443 if (p->cur_residue < 0 || p->tot_residue < 0) {
444 shost_printk(KERN_ERR, esp->host,
445 "Data transfer overflow.\n");
446 shost_printk(KERN_ERR, esp->host,
447 "cur_residue[%d] tot_residue[%d] len[%u]\n",
448 p->cur_residue, p->tot_residue, len);
449 p->cur_residue = 0;
450 p->tot_residue = 0;
451 }
452 if (!p->cur_residue && p->tot_residue) {
453 p->prv_sg = p->cur_sg;
454 p->cur_sg = sg_next(p->cur_sg);
455 p->cur_residue = sg_dma_len(p->cur_sg);
456 }
457}
458
459static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
460{
461 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
462 scsi_dma_unmap(cmd);
463}
464
465static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
466{
467 struct scsi_cmnd *cmd = ent->cmd;
468 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
469
470 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
471 ent->saved_sense_ptr = ent->sense_ptr;
472 return;
473 }
474 ent->saved_cur_residue = spriv->cur_residue;
475 ent->saved_prv_sg = spriv->prv_sg;
476 ent->saved_cur_sg = spriv->cur_sg;
477 ent->saved_tot_residue = spriv->tot_residue;
478}
479
480static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
481{
482 struct scsi_cmnd *cmd = ent->cmd;
483 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
484
485 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
486 ent->sense_ptr = ent->saved_sense_ptr;
487 return;
488 }
489 spriv->cur_residue = ent->saved_cur_residue;
490 spriv->prv_sg = ent->saved_prv_sg;
491 spriv->cur_sg = ent->saved_cur_sg;
492 spriv->tot_residue = ent->saved_tot_residue;
493}
494
495static void esp_write_tgt_config3(struct esp *esp, int tgt)
496{
497 if (esp->rev > ESP100A) {
498 u8 val = esp->target[tgt].esp_config3;
499
500 if (val != esp->prev_cfg3) {
501 esp->prev_cfg3 = val;
502 esp_write8(val, ESP_CFG3);
503 }
504 }
505}
506
507static void esp_write_tgt_sync(struct esp *esp, int tgt)
508{
509 u8 off = esp->target[tgt].esp_offset;
510 u8 per = esp->target[tgt].esp_period;
511
512 if (off != esp->prev_soff) {
513 esp->prev_soff = off;
514 esp_write8(off, ESP_SOFF);
515 }
516 if (per != esp->prev_stp) {
517 esp->prev_stp = per;
518 esp_write8(per, ESP_STP);
519 }
520}
521
522static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
523{
524 if (esp->rev == FASHME) {
525
526 if (dma_len > (1U << 24))
527 dma_len = (1U << 24);
528 } else {
529 u32 base, end;
530
531
532
533
534
535
536
537 if (dma_len > (1U << 16))
538 dma_len = (1U << 16);
539
540
541
542
543 base = dma_addr & ((1U << 24) - 1U);
544 end = base + dma_len;
545 if (end > (1U << 24))
546 end = (1U <<24);
547 dma_len = end - base;
548 }
549 return dma_len;
550}
551
552static int esp_need_to_nego_wide(struct esp_target_data *tp)
553{
554 struct scsi_target *target = tp->starget;
555
556 return spi_width(target) != tp->nego_goal_width;
557}
558
559static int esp_need_to_nego_sync(struct esp_target_data *tp)
560{
561 struct scsi_target *target = tp->starget;
562
563
564 if (!spi_offset(target) && !tp->nego_goal_offset)
565 return 0;
566
567 if (spi_offset(target) == tp->nego_goal_offset &&
568 spi_period(target) == tp->nego_goal_period)
569 return 0;
570
571 return 1;
572}
573
574static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
575 struct esp_lun_data *lp)
576{
577 if (!ent->orig_tag[0]) {
578
579 if (lp->non_tagged_cmd)
580 return -EBUSY;
581
582 if (lp->hold) {
583
584
585
586 if (lp->num_tagged)
587 return -EBUSY;
588
589
590
591
592 lp->hold = 0;
593 } else if (lp->num_tagged) {
594
595
596
597 lp->hold = 1;
598 return -EBUSY;
599 }
600
601 lp->non_tagged_cmd = ent;
602 return 0;
603 }
604
605
606 if (lp->non_tagged_cmd || lp->hold)
607 return -EBUSY;
608
609 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
610
611 lp->tagged_cmds[ent->orig_tag[1]] = ent;
612 lp->num_tagged++;
613
614 return 0;
615}
616
617static void esp_free_lun_tag(struct esp_cmd_entry *ent,
618 struct esp_lun_data *lp)
619{
620 if (ent->orig_tag[0]) {
621 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
622 lp->tagged_cmds[ent->orig_tag[1]] = NULL;
623 lp->num_tagged--;
624 } else {
625 BUG_ON(lp->non_tagged_cmd != ent);
626 lp->non_tagged_cmd = NULL;
627 }
628}
629
630static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
631{
632 ent->sense_ptr = ent->cmd->sense_buffer;
633 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
634 ent->sense_dma = (uintptr_t)ent->sense_ptr;
635 return;
636 }
637
638 ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
639 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
640}
641
642static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
643{
644 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
645 dma_unmap_single(esp->dev, ent->sense_dma,
646 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
647 ent->sense_ptr = NULL;
648}
649
650
651
652
653
654
655
656
657static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
658{
659 struct scsi_cmnd *cmd = ent->cmd;
660 struct scsi_device *dev = cmd->device;
661 int tgt, lun;
662 u8 *p, val;
663
664 tgt = dev->id;
665 lun = dev->lun;
666
667
668 if (!ent->sense_ptr) {
669 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
670 tgt, lun);
671 esp_map_sense(esp, ent);
672 }
673 ent->saved_sense_ptr = ent->sense_ptr;
674
675 esp->active_cmd = ent;
676
677 p = esp->command_block;
678 esp->msg_out_len = 0;
679
680 *p++ = IDENTIFY(0, lun);
681 *p++ = REQUEST_SENSE;
682 *p++ = ((dev->scsi_level <= SCSI_2) ?
683 (lun << 5) : 0);
684 *p++ = 0;
685 *p++ = 0;
686 *p++ = SCSI_SENSE_BUFFERSIZE;
687 *p++ = 0;
688
689 esp->select_state = ESP_SELECT_BASIC;
690
691 val = tgt;
692 if (esp->rev == FASHME)
693 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
694 esp_write8(val, ESP_BUSID);
695
696 esp_write_tgt_sync(esp, tgt);
697 esp_write_tgt_config3(esp, tgt);
698
699 val = (p - esp->command_block);
700
701 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
702}
703
704static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
705{
706 struct esp_cmd_entry *ent;
707
708 list_for_each_entry(ent, &esp->queued_cmds, list) {
709 struct scsi_cmnd *cmd = ent->cmd;
710 struct scsi_device *dev = cmd->device;
711 struct esp_lun_data *lp = dev->hostdata;
712
713 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
714 ent->tag[0] = 0;
715 ent->tag[1] = 0;
716 return ent;
717 }
718
719 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
720 ent->tag[0] = 0;
721 ent->tag[1] = 0;
722 }
723 ent->orig_tag[0] = ent->tag[0];
724 ent->orig_tag[1] = ent->tag[1];
725
726 if (esp_alloc_lun_tag(ent, lp) < 0)
727 continue;
728
729 return ent;
730 }
731
732 return NULL;
733}
734
735static void esp_maybe_execute_command(struct esp *esp)
736{
737 struct esp_target_data *tp;
738 struct scsi_device *dev;
739 struct scsi_cmnd *cmd;
740 struct esp_cmd_entry *ent;
741 bool select_and_stop = false;
742 int tgt, lun, i;
743 u32 val, start_cmd;
744 u8 *p;
745
746 if (esp->active_cmd ||
747 (esp->flags & ESP_FLAG_RESETTING))
748 return;
749
750 ent = find_and_prep_issuable_command(esp);
751 if (!ent)
752 return;
753
754 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
755 esp_autosense(esp, ent);
756 return;
757 }
758
759 cmd = ent->cmd;
760 dev = cmd->device;
761 tgt = dev->id;
762 lun = dev->lun;
763 tp = &esp->target[tgt];
764
765 list_move(&ent->list, &esp->active_cmds);
766
767 esp->active_cmd = ent;
768
769 esp_map_dma(esp, cmd);
770 esp_save_pointers(esp, ent);
771
772 if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
773 select_and_stop = true;
774
775 p = esp->command_block;
776
777 esp->msg_out_len = 0;
778 if (tp->flags & ESP_TGT_CHECK_NEGO) {
779
780
781
782 if (tp->flags & ESP_TGT_BROKEN) {
783 tp->flags &= ~ESP_TGT_DISCONNECT;
784 tp->nego_goal_period = 0;
785 tp->nego_goal_offset = 0;
786 tp->nego_goal_width = 0;
787 tp->nego_goal_tags = 0;
788 }
789
790
791 if (spi_width(tp->starget) == tp->nego_goal_width &&
792 spi_period(tp->starget) == tp->nego_goal_period &&
793 spi_offset(tp->starget) == tp->nego_goal_offset) {
794 tp->flags &= ~ESP_TGT_CHECK_NEGO;
795 goto build_identify;
796 }
797
798 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
799 esp->msg_out_len =
800 spi_populate_width_msg(&esp->msg_out[0],
801 (tp->nego_goal_width ?
802 1 : 0));
803 tp->flags |= ESP_TGT_NEGO_WIDE;
804 } else if (esp_need_to_nego_sync(tp)) {
805 esp->msg_out_len =
806 spi_populate_sync_msg(&esp->msg_out[0],
807 tp->nego_goal_period,
808 tp->nego_goal_offset);
809 tp->flags |= ESP_TGT_NEGO_SYNC;
810 } else {
811 tp->flags &= ~ESP_TGT_CHECK_NEGO;
812 }
813
814
815 if (esp->msg_out_len)
816 select_and_stop = true;
817 }
818
819build_identify:
820 *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
821
822 if (ent->tag[0] && esp->rev == ESP100) {
823
824
825
826 select_and_stop = true;
827 }
828
829 if (select_and_stop) {
830 esp->cmd_bytes_left = cmd->cmd_len;
831 esp->cmd_bytes_ptr = &cmd->cmnd[0];
832
833 if (ent->tag[0]) {
834 for (i = esp->msg_out_len - 1;
835 i >= 0; i--)
836 esp->msg_out[i + 2] = esp->msg_out[i];
837 esp->msg_out[0] = ent->tag[0];
838 esp->msg_out[1] = ent->tag[1];
839 esp->msg_out_len += 2;
840 }
841
842 start_cmd = ESP_CMD_SELAS;
843 esp->select_state = ESP_SELECT_MSGOUT;
844 } else {
845 start_cmd = ESP_CMD_SELA;
846 if (ent->tag[0]) {
847 *p++ = ent->tag[0];
848 *p++ = ent->tag[1];
849
850 start_cmd = ESP_CMD_SA3;
851 }
852
853 for (i = 0; i < cmd->cmd_len; i++)
854 *p++ = cmd->cmnd[i];
855
856 esp->select_state = ESP_SELECT_BASIC;
857 }
858 val = tgt;
859 if (esp->rev == FASHME)
860 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
861 esp_write8(val, ESP_BUSID);
862
863 esp_write_tgt_sync(esp, tgt);
864 esp_write_tgt_config3(esp, tgt);
865
866 val = (p - esp->command_block);
867
868 if (esp_debug & ESP_DEBUG_SCSICMD) {
869 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
870 for (i = 0; i < cmd->cmd_len; i++)
871 printk("%02x ", cmd->cmnd[i]);
872 printk("]\n");
873 }
874
875 esp_send_dma_cmd(esp, val, 16, start_cmd);
876}
877
878static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
879{
880 struct list_head *head = &esp->esp_cmd_pool;
881 struct esp_cmd_entry *ret;
882
883 if (list_empty(head)) {
884 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
885 } else {
886 ret = list_entry(head->next, struct esp_cmd_entry, list);
887 list_del(&ret->list);
888 memset(ret, 0, sizeof(*ret));
889 }
890 return ret;
891}
892
893static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
894{
895 list_add(&ent->list, &esp->esp_cmd_pool);
896}
897
898static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
899 struct scsi_cmnd *cmd, unsigned char host_byte)
900{
901 struct scsi_device *dev = cmd->device;
902 int tgt = dev->id;
903 int lun = dev->lun;
904
905 esp->active_cmd = NULL;
906 esp_unmap_dma(esp, cmd);
907 esp_free_lun_tag(ent, dev->hostdata);
908 cmd->result = 0;
909 set_host_byte(cmd, host_byte);
910 if (host_byte == DID_OK)
911 set_status_byte(cmd, ent->status);
912
913 if (ent->eh_done) {
914 complete(ent->eh_done);
915 ent->eh_done = NULL;
916 }
917
918 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
919 esp_unmap_sense(esp, ent);
920
921
922
923
924
925 cmd->result = SAM_STAT_CHECK_CONDITION;
926
927 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
928 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
929 int i;
930
931 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
932 esp->host->unique_id, tgt, lun);
933 for (i = 0; i < 18; i++)
934 printk("%02x ", cmd->sense_buffer[i]);
935 printk("]\n");
936 }
937 }
938
939 cmd->scsi_done(cmd);
940
941 list_del(&ent->list);
942 esp_put_ent(esp, ent);
943
944 esp_maybe_execute_command(esp);
945}
946
947static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
948{
949 struct scsi_device *dev = ent->cmd->device;
950 struct esp_lun_data *lp = dev->hostdata;
951
952 scsi_track_queue_full(dev, lp->num_tagged - 1);
953}
954
955static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
956{
957 struct scsi_device *dev = cmd->device;
958 struct esp *esp = shost_priv(dev->host);
959 struct esp_cmd_priv *spriv;
960 struct esp_cmd_entry *ent;
961
962 ent = esp_get_ent(esp);
963 if (!ent)
964 return SCSI_MLQUEUE_HOST_BUSY;
965
966 ent->cmd = cmd;
967
968 cmd->scsi_done = done;
969
970 spriv = ESP_CMD_PRIV(cmd);
971 spriv->num_sg = 0;
972
973 list_add_tail(&ent->list, &esp->queued_cmds);
974
975 esp_maybe_execute_command(esp);
976
977 return 0;
978}
979
980static DEF_SCSI_QCMD(esp_queuecommand)
981
982static int esp_check_gross_error(struct esp *esp)
983{
984 if (esp->sreg & ESP_STAT_SPAM) {
985
986
987
988
989
990
991 shost_printk(KERN_ERR, esp->host,
992 "Gross error sreg[%02x]\n", esp->sreg);
993
994 return 1;
995 }
996 return 0;
997}
998
999static int esp_check_spur_intr(struct esp *esp)
1000{
1001 switch (esp->rev) {
1002 case ESP100:
1003 case ESP100A:
1004
1005
1006
1007 esp->sreg &= ~ESP_STAT_INTR;
1008 break;
1009
1010 default:
1011 if (!(esp->sreg & ESP_STAT_INTR)) {
1012 if (esp->ireg & ESP_INTR_SR)
1013 return 1;
1014
1015
1016
1017
1018 if (!esp->ops->dma_error(esp)) {
1019 shost_printk(KERN_ERR, esp->host,
1020 "Spurious irq, sreg=%02x.\n",
1021 esp->sreg);
1022 return -1;
1023 }
1024
1025 shost_printk(KERN_ERR, esp->host, "DMA error\n");
1026
1027
1028 return -1;
1029 }
1030 break;
1031 }
1032
1033 return 0;
1034}
1035
1036static void esp_schedule_reset(struct esp *esp)
1037{
1038 esp_log_reset("esp_schedule_reset() from %ps\n",
1039 __builtin_return_address(0));
1040 esp->flags |= ESP_FLAG_RESETTING;
1041 esp_event(esp, ESP_EVENT_RESET);
1042}
1043
1044
1045
1046
1047
1048static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1049 struct esp_lun_data *lp)
1050{
1051 struct esp_cmd_entry *ent;
1052 int i;
1053
1054 if (!lp->num_tagged) {
1055 shost_printk(KERN_ERR, esp->host,
1056 "Reconnect w/num_tagged==0\n");
1057 return NULL;
1058 }
1059
1060 esp_log_reconnect("reconnect tag, ");
1061
1062 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1063 if (esp->ops->irq_pending(esp))
1064 break;
1065 }
1066 if (i == ESP_QUICKIRQ_LIMIT) {
1067 shost_printk(KERN_ERR, esp->host,
1068 "Reconnect IRQ1 timeout\n");
1069 return NULL;
1070 }
1071
1072 esp->sreg = esp_read8(ESP_STATUS);
1073 esp->ireg = esp_read8(ESP_INTRPT);
1074
1075 esp_log_reconnect("IRQ(%d:%x:%x), ",
1076 i, esp->ireg, esp->sreg);
1077
1078 if (esp->ireg & ESP_INTR_DC) {
1079 shost_printk(KERN_ERR, esp->host,
1080 "Reconnect, got disconnect.\n");
1081 return NULL;
1082 }
1083
1084 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1085 shost_printk(KERN_ERR, esp->host,
1086 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1087 return NULL;
1088 }
1089
1090
1091 esp->command_block[0] = 0xff;
1092 esp->command_block[1] = 0xff;
1093 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1094 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1095
1096
1097 scsi_esp_cmd(esp, ESP_CMD_MOK);
1098
1099 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1100 if (esp->ops->irq_pending(esp)) {
1101 esp->sreg = esp_read8(ESP_STATUS);
1102 esp->ireg = esp_read8(ESP_INTRPT);
1103 if (esp->ireg & ESP_INTR_FDONE)
1104 break;
1105 }
1106 udelay(1);
1107 }
1108 if (i == ESP_RESELECT_TAG_LIMIT) {
1109 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
1110 return NULL;
1111 }
1112 esp->ops->dma_drain(esp);
1113 esp->ops->dma_invalidate(esp);
1114
1115 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1116 i, esp->ireg, esp->sreg,
1117 esp->command_block[0],
1118 esp->command_block[1]);
1119
1120 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1121 esp->command_block[0] > ORDERED_QUEUE_TAG) {
1122 shost_printk(KERN_ERR, esp->host,
1123 "Reconnect, bad tag type %02x.\n",
1124 esp->command_block[0]);
1125 return NULL;
1126 }
1127
1128 ent = lp->tagged_cmds[esp->command_block[1]];
1129 if (!ent) {
1130 shost_printk(KERN_ERR, esp->host,
1131 "Reconnect, no entry for tag %02x.\n",
1132 esp->command_block[1]);
1133 return NULL;
1134 }
1135
1136 return ent;
1137}
1138
1139static int esp_reconnect(struct esp *esp)
1140{
1141 struct esp_cmd_entry *ent;
1142 struct esp_target_data *tp;
1143 struct esp_lun_data *lp;
1144 struct scsi_device *dev;
1145 int target, lun;
1146
1147 BUG_ON(esp->active_cmd);
1148 if (esp->rev == FASHME) {
1149
1150
1151
1152 target = esp->fifo[0];
1153 lun = esp->fifo[1] & 0x7;
1154 } else {
1155 u8 bits = esp_read8(ESP_FDATA);
1156
1157
1158
1159
1160
1161
1162
1163 if (!(bits & esp->scsi_id_mask))
1164 goto do_reset;
1165 bits &= ~esp->scsi_id_mask;
1166 if (!bits || (bits & (bits - 1)))
1167 goto do_reset;
1168
1169 target = ffs(bits) - 1;
1170 lun = (esp_read8(ESP_FDATA) & 0x7);
1171
1172 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1173 if (esp->rev == ESP100) {
1174 u8 ireg = esp_read8(ESP_INTRPT);
1175
1176
1177
1178
1179
1180 if (ireg & ESP_INTR_SR)
1181 goto do_reset;
1182 }
1183 scsi_esp_cmd(esp, ESP_CMD_NULL);
1184 }
1185
1186 esp_write_tgt_sync(esp, target);
1187 esp_write_tgt_config3(esp, target);
1188
1189 scsi_esp_cmd(esp, ESP_CMD_MOK);
1190
1191 if (esp->rev == FASHME)
1192 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1193 ESP_BUSID);
1194
1195 tp = &esp->target[target];
1196 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1197 if (!dev) {
1198 shost_printk(KERN_ERR, esp->host,
1199 "Reconnect, no lp tgt[%u] lun[%u]\n",
1200 target, lun);
1201 goto do_reset;
1202 }
1203 lp = dev->hostdata;
1204
1205 ent = lp->non_tagged_cmd;
1206 if (!ent) {
1207 ent = esp_reconnect_with_tag(esp, lp);
1208 if (!ent)
1209 goto do_reset;
1210 }
1211
1212 esp->active_cmd = ent;
1213
1214 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1215 esp_restore_pointers(esp, ent);
1216 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1217 return 1;
1218
1219do_reset:
1220 esp_schedule_reset(esp);
1221 return 0;
1222}
1223
1224static int esp_finish_select(struct esp *esp)
1225{
1226 struct esp_cmd_entry *ent;
1227 struct scsi_cmnd *cmd;
1228
1229
1230 esp->select_state = ESP_SELECT_NONE;
1231
1232 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1233 ent = esp->active_cmd;
1234 cmd = ent->cmd;
1235
1236 if (esp->ops->dma_error(esp)) {
1237
1238
1239
1240 esp_schedule_reset(esp);
1241 esp_cmd_is_done(esp, ent, cmd, DID_ERROR);
1242 return 0;
1243 }
1244
1245 esp->ops->dma_invalidate(esp);
1246
1247 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1248 struct esp_target_data *tp = &esp->target[cmd->device->id];
1249
1250
1251
1252
1253
1254 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1255 esp_unmap_dma(esp, cmd);
1256 esp_free_lun_tag(ent, cmd->device->hostdata);
1257 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1258 esp->cmd_bytes_ptr = NULL;
1259 esp->cmd_bytes_left = 0;
1260 } else {
1261 esp_unmap_sense(esp, ent);
1262 }
1263
1264
1265
1266
1267 list_move(&ent->list, &esp->queued_cmds);
1268 esp->active_cmd = NULL;
1269
1270
1271
1272
1273 return 0;
1274 }
1275
1276 if (esp->ireg == ESP_INTR_DC) {
1277 struct scsi_device *dev = cmd->device;
1278
1279
1280
1281
1282
1283 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1284
1285 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1286 esp_cmd_is_done(esp, ent, cmd, DID_BAD_TARGET);
1287 return 1;
1288 }
1289
1290 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1291
1292
1293
1294 if (esp->rev <= ESP236) {
1295 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1296
1297 scsi_esp_cmd(esp, ESP_CMD_NULL);
1298
1299 if (!fcnt &&
1300 (!esp->prev_soff ||
1301 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1302 esp_flush_fifo(esp);
1303 }
1304
1305
1306
1307
1308 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1309 return 0;
1310 }
1311
1312 shost_printk(KERN_INFO, esp->host,
1313 "Unexpected selection completion ireg[%x]\n", esp->ireg);
1314 esp_schedule_reset(esp);
1315 return 0;
1316}
1317
1318static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1319 struct scsi_cmnd *cmd)
1320{
1321 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1322
1323 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1324 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1325 fifo_cnt <<= 1;
1326
1327 ecount = 0;
1328 if (!(esp->sreg & ESP_STAT_TCNT)) {
1329 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1330 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1331 if (esp->rev == FASHME)
1332 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1333 if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1334 ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
1335 }
1336
1337 bytes_sent = esp->data_dma_len;
1338 bytes_sent -= ecount;
1339 bytes_sent -= esp->send_cmd_residual;
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349 if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1350 size_t count = 1;
1351 size_t offset = bytes_sent;
1352 u8 bval = esp_read8(ESP_FDATA);
1353
1354 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1355 ent->sense_ptr[bytes_sent] = bval;
1356 else {
1357 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1358 u8 *ptr;
1359
1360 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
1361 &offset, &count);
1362 if (likely(ptr)) {
1363 *(ptr + offset) = bval;
1364 scsi_kunmap_atomic_sg(ptr);
1365 }
1366 }
1367 bytes_sent += fifo_cnt;
1368 ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1369 }
1370 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1371 bytes_sent -= fifo_cnt;
1372
1373 flush_fifo = 0;
1374 if (!esp->prev_soff) {
1375
1376 flush_fifo = 1;
1377 } else {
1378 if (esp->rev == ESP100) {
1379 u32 fflags, phase;
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392 esp->sreg = esp_read8(ESP_STATUS);
1393 phase = esp->sreg & ESP_STAT_PMASK;
1394 fflags = esp_read8(ESP_FFLAGS);
1395
1396 if ((phase == ESP_DOP &&
1397 (fflags & ESP_FF_ONOTZERO)) ||
1398 (phase == ESP_DIP &&
1399 (fflags & ESP_FF_FBYTES)))
1400 return -1;
1401 }
1402 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1403 flush_fifo = 1;
1404 }
1405
1406 if (flush_fifo)
1407 esp_flush_fifo(esp);
1408
1409 return bytes_sent;
1410}
1411
1412static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1413 u8 scsi_period, u8 scsi_offset,
1414 u8 esp_stp, u8 esp_soff)
1415{
1416 spi_period(tp->starget) = scsi_period;
1417 spi_offset(tp->starget) = scsi_offset;
1418 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1419
1420 if (esp_soff) {
1421 esp_stp &= 0x1f;
1422 esp_soff |= esp->radelay;
1423 if (esp->rev >= FAS236) {
1424 u8 bit = ESP_CONFIG3_FSCSI;
1425 if (esp->rev >= FAS100A)
1426 bit = ESP_CONFIG3_FAST;
1427
1428 if (scsi_period < 50) {
1429 if (esp->rev == FASHME)
1430 esp_soff &= ~esp->radelay;
1431 tp->esp_config3 |= bit;
1432 } else {
1433 tp->esp_config3 &= ~bit;
1434 }
1435 esp->prev_cfg3 = tp->esp_config3;
1436 esp_write8(esp->prev_cfg3, ESP_CFG3);
1437 }
1438 }
1439
1440 tp->esp_period = esp->prev_stp = esp_stp;
1441 tp->esp_offset = esp->prev_soff = esp_soff;
1442
1443 esp_write8(esp_soff, ESP_SOFF);
1444 esp_write8(esp_stp, ESP_STP);
1445
1446 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1447
1448 spi_display_xfer_agreement(tp->starget);
1449}
1450
1451static void esp_msgin_reject(struct esp *esp)
1452{
1453 struct esp_cmd_entry *ent = esp->active_cmd;
1454 struct scsi_cmnd *cmd = ent->cmd;
1455 struct esp_target_data *tp;
1456 int tgt;
1457
1458 tgt = cmd->device->id;
1459 tp = &esp->target[tgt];
1460
1461 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1462 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1463
1464 if (!esp_need_to_nego_sync(tp)) {
1465 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1466 scsi_esp_cmd(esp, ESP_CMD_RATN);
1467 } else {
1468 esp->msg_out_len =
1469 spi_populate_sync_msg(&esp->msg_out[0],
1470 tp->nego_goal_period,
1471 tp->nego_goal_offset);
1472 tp->flags |= ESP_TGT_NEGO_SYNC;
1473 scsi_esp_cmd(esp, ESP_CMD_SATN);
1474 }
1475 return;
1476 }
1477
1478 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1479 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1480 tp->esp_period = 0;
1481 tp->esp_offset = 0;
1482 esp_setsync(esp, tp, 0, 0, 0, 0);
1483 scsi_esp_cmd(esp, ESP_CMD_RATN);
1484 return;
1485 }
1486
1487 shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
1488 esp_schedule_reset(esp);
1489}
1490
1491static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1492{
1493 u8 period = esp->msg_in[3];
1494 u8 offset = esp->msg_in[4];
1495 u8 stp;
1496
1497 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1498 goto do_reject;
1499
1500 if (offset > 15)
1501 goto do_reject;
1502
1503 if (offset) {
1504 int one_clock;
1505
1506 if (period > esp->max_period) {
1507 period = offset = 0;
1508 goto do_sdtr;
1509 }
1510 if (period < esp->min_period)
1511 goto do_reject;
1512
1513 one_clock = esp->ccycle / 1000;
1514 stp = DIV_ROUND_UP(period << 2, one_clock);
1515 if (stp && esp->rev >= FAS236) {
1516 if (stp >= 50)
1517 stp--;
1518 }
1519 } else {
1520 stp = 0;
1521 }
1522
1523 esp_setsync(esp, tp, period, offset, stp, offset);
1524 return;
1525
1526do_reject:
1527 esp->msg_out[0] = MESSAGE_REJECT;
1528 esp->msg_out_len = 1;
1529 scsi_esp_cmd(esp, ESP_CMD_SATN);
1530 return;
1531
1532do_sdtr:
1533 tp->nego_goal_period = period;
1534 tp->nego_goal_offset = offset;
1535 esp->msg_out_len =
1536 spi_populate_sync_msg(&esp->msg_out[0],
1537 tp->nego_goal_period,
1538 tp->nego_goal_offset);
1539 scsi_esp_cmd(esp, ESP_CMD_SATN);
1540}
1541
1542static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1543{
1544 int size = 8 << esp->msg_in[3];
1545 u8 cfg3;
1546
1547 if (esp->rev != FASHME)
1548 goto do_reject;
1549
1550 if (size != 8 && size != 16)
1551 goto do_reject;
1552
1553 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1554 goto do_reject;
1555
1556 cfg3 = tp->esp_config3;
1557 if (size == 16) {
1558 tp->flags |= ESP_TGT_WIDE;
1559 cfg3 |= ESP_CONFIG3_EWIDE;
1560 } else {
1561 tp->flags &= ~ESP_TGT_WIDE;
1562 cfg3 &= ~ESP_CONFIG3_EWIDE;
1563 }
1564 tp->esp_config3 = cfg3;
1565 esp->prev_cfg3 = cfg3;
1566 esp_write8(cfg3, ESP_CFG3);
1567
1568 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1569
1570 spi_period(tp->starget) = 0;
1571 spi_offset(tp->starget) = 0;
1572 if (!esp_need_to_nego_sync(tp)) {
1573 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1574 scsi_esp_cmd(esp, ESP_CMD_RATN);
1575 } else {
1576 esp->msg_out_len =
1577 spi_populate_sync_msg(&esp->msg_out[0],
1578 tp->nego_goal_period,
1579 tp->nego_goal_offset);
1580 tp->flags |= ESP_TGT_NEGO_SYNC;
1581 scsi_esp_cmd(esp, ESP_CMD_SATN);
1582 }
1583 return;
1584
1585do_reject:
1586 esp->msg_out[0] = MESSAGE_REJECT;
1587 esp->msg_out_len = 1;
1588 scsi_esp_cmd(esp, ESP_CMD_SATN);
1589}
1590
1591static void esp_msgin_extended(struct esp *esp)
1592{
1593 struct esp_cmd_entry *ent = esp->active_cmd;
1594 struct scsi_cmnd *cmd = ent->cmd;
1595 struct esp_target_data *tp;
1596 int tgt = cmd->device->id;
1597
1598 tp = &esp->target[tgt];
1599 if (esp->msg_in[2] == EXTENDED_SDTR) {
1600 esp_msgin_sdtr(esp, tp);
1601 return;
1602 }
1603 if (esp->msg_in[2] == EXTENDED_WDTR) {
1604 esp_msgin_wdtr(esp, tp);
1605 return;
1606 }
1607
1608 shost_printk(KERN_INFO, esp->host,
1609 "Unexpected extended msg type %x\n", esp->msg_in[2]);
1610
1611 esp->msg_out[0] = MESSAGE_REJECT;
1612 esp->msg_out_len = 1;
1613 scsi_esp_cmd(esp, ESP_CMD_SATN);
1614}
1615
1616
1617
1618
1619static int esp_msgin_process(struct esp *esp)
1620{
1621 u8 msg0 = esp->msg_in[0];
1622 int len = esp->msg_in_len;
1623
1624 if (msg0 & 0x80) {
1625
1626 shost_printk(KERN_INFO, esp->host,
1627 "Unexpected msgin identify\n");
1628 return 0;
1629 }
1630
1631 switch (msg0) {
1632 case EXTENDED_MESSAGE:
1633 if (len == 1)
1634 return 1;
1635 if (len < esp->msg_in[1] + 2)
1636 return 1;
1637 esp_msgin_extended(esp);
1638 return 0;
1639
1640 case IGNORE_WIDE_RESIDUE: {
1641 struct esp_cmd_entry *ent;
1642 struct esp_cmd_priv *spriv;
1643 if (len == 1)
1644 return 1;
1645
1646 if (esp->msg_in[1] != 1)
1647 goto do_reject;
1648
1649 ent = esp->active_cmd;
1650 spriv = ESP_CMD_PRIV(ent->cmd);
1651
1652 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1653 spriv->cur_sg = spriv->prv_sg;
1654 spriv->cur_residue = 1;
1655 } else
1656 spriv->cur_residue++;
1657 spriv->tot_residue++;
1658 return 0;
1659 }
1660 case NOP:
1661 return 0;
1662 case RESTORE_POINTERS:
1663 esp_restore_pointers(esp, esp->active_cmd);
1664 return 0;
1665 case SAVE_POINTERS:
1666 esp_save_pointers(esp, esp->active_cmd);
1667 return 0;
1668
1669 case COMMAND_COMPLETE:
1670 case DISCONNECT: {
1671 struct esp_cmd_entry *ent = esp->active_cmd;
1672
1673 ent->message = msg0;
1674 esp_event(esp, ESP_EVENT_FREE_BUS);
1675 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1676 return 0;
1677 }
1678 case MESSAGE_REJECT:
1679 esp_msgin_reject(esp);
1680 return 0;
1681
1682 default:
1683 do_reject:
1684 esp->msg_out[0] = MESSAGE_REJECT;
1685 esp->msg_out_len = 1;
1686 scsi_esp_cmd(esp, ESP_CMD_SATN);
1687 return 0;
1688 }
1689}
1690
1691static int esp_process_event(struct esp *esp)
1692{
1693 int write, i;
1694
1695again:
1696 write = 0;
1697 esp_log_event("process event %d phase %x\n",
1698 esp->event, esp->sreg & ESP_STAT_PMASK);
1699 switch (esp->event) {
1700 case ESP_EVENT_CHECK_PHASE:
1701 switch (esp->sreg & ESP_STAT_PMASK) {
1702 case ESP_DOP:
1703 esp_event(esp, ESP_EVENT_DATA_OUT);
1704 break;
1705 case ESP_DIP:
1706 esp_event(esp, ESP_EVENT_DATA_IN);
1707 break;
1708 case ESP_STATP:
1709 esp_flush_fifo(esp);
1710 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1711 esp_event(esp, ESP_EVENT_STATUS);
1712 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1713 return 1;
1714
1715 case ESP_MOP:
1716 esp_event(esp, ESP_EVENT_MSGOUT);
1717 break;
1718
1719 case ESP_MIP:
1720 esp_event(esp, ESP_EVENT_MSGIN);
1721 break;
1722
1723 case ESP_CMDP:
1724 esp_event(esp, ESP_EVENT_CMD_START);
1725 break;
1726
1727 default:
1728 shost_printk(KERN_INFO, esp->host,
1729 "Unexpected phase, sreg=%02x\n",
1730 esp->sreg);
1731 esp_schedule_reset(esp);
1732 return 0;
1733 }
1734 goto again;
1735
1736 case ESP_EVENT_DATA_IN:
1737 write = 1;
1738 fallthrough;
1739
1740 case ESP_EVENT_DATA_OUT: {
1741 struct esp_cmd_entry *ent = esp->active_cmd;
1742 struct scsi_cmnd *cmd = ent->cmd;
1743 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1744 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1745
1746 if (esp->rev == ESP100)
1747 scsi_esp_cmd(esp, ESP_CMD_NULL);
1748
1749 if (write)
1750 ent->flags |= ESP_CMD_FLAG_WRITE;
1751 else
1752 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1753
1754 if (esp->ops->dma_length_limit)
1755 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1756 dma_len);
1757 else
1758 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1759
1760 esp->data_dma_len = dma_len;
1761
1762 if (!dma_len) {
1763 shost_printk(KERN_ERR, esp->host,
1764 "DMA length is zero!\n");
1765 shost_printk(KERN_ERR, esp->host,
1766 "cur adr[%08llx] len[%08x]\n",
1767 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1768 esp_cur_dma_len(ent, cmd));
1769 esp_schedule_reset(esp);
1770 return 0;
1771 }
1772
1773 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
1774 (unsigned long long)dma_addr, dma_len, write);
1775
1776 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1777 write, ESP_CMD_DMA | ESP_CMD_TI);
1778 esp_event(esp, ESP_EVENT_DATA_DONE);
1779 break;
1780 }
1781 case ESP_EVENT_DATA_DONE: {
1782 struct esp_cmd_entry *ent = esp->active_cmd;
1783 struct scsi_cmnd *cmd = ent->cmd;
1784 int bytes_sent;
1785
1786 if (esp->ops->dma_error(esp)) {
1787 shost_printk(KERN_INFO, esp->host,
1788 "data done, DMA error, resetting\n");
1789 esp_schedule_reset(esp);
1790 return 0;
1791 }
1792
1793 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1794
1795
1796 esp->ops->dma_drain(esp);
1797 }
1798 esp->ops->dma_invalidate(esp);
1799
1800 if (esp->ireg != ESP_INTR_BSERV) {
1801
1802
1803
1804 shost_printk(KERN_INFO, esp->host,
1805 "data done, not BSERV, resetting\n");
1806 esp_schedule_reset(esp);
1807 return 0;
1808 }
1809
1810 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1811
1812 esp_log_datadone("data done flgs[%x] sent[%d]\n",
1813 ent->flags, bytes_sent);
1814
1815 if (bytes_sent < 0) {
1816
1817 esp_schedule_reset(esp);
1818 return 0;
1819 }
1820
1821 esp_advance_dma(esp, ent, cmd, bytes_sent);
1822 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1823 goto again;
1824 }
1825
1826 case ESP_EVENT_STATUS: {
1827 struct esp_cmd_entry *ent = esp->active_cmd;
1828
1829 if (esp->ireg & ESP_INTR_FDONE) {
1830 ent->status = esp_read8(ESP_FDATA);
1831 ent->message = esp_read8(ESP_FDATA);
1832 scsi_esp_cmd(esp, ESP_CMD_MOK);
1833 } else if (esp->ireg == ESP_INTR_BSERV) {
1834 ent->status = esp_read8(ESP_FDATA);
1835 ent->message = 0xff;
1836 esp_event(esp, ESP_EVENT_MSGIN);
1837 return 0;
1838 }
1839
1840 if (ent->message != COMMAND_COMPLETE) {
1841 shost_printk(KERN_INFO, esp->host,
1842 "Unexpected message %x in status\n",
1843 ent->message);
1844 esp_schedule_reset(esp);
1845 return 0;
1846 }
1847
1848 esp_event(esp, ESP_EVENT_FREE_BUS);
1849 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1850 break;
1851 }
1852 case ESP_EVENT_FREE_BUS: {
1853 struct esp_cmd_entry *ent = esp->active_cmd;
1854 struct scsi_cmnd *cmd = ent->cmd;
1855
1856 if (ent->message == COMMAND_COMPLETE ||
1857 ent->message == DISCONNECT)
1858 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1859
1860 if (ent->message == COMMAND_COMPLETE) {
1861 esp_log_cmddone("Command done status[%x] message[%x]\n",
1862 ent->status, ent->message);
1863 if (ent->status == SAM_STAT_TASK_SET_FULL)
1864 esp_event_queue_full(esp, ent);
1865
1866 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1867 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1868 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1869 esp_autosense(esp, ent);
1870 } else {
1871 esp_cmd_is_done(esp, ent, cmd, DID_OK);
1872 }
1873 } else if (ent->message == DISCONNECT) {
1874 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
1875 cmd->device->id,
1876 ent->tag[0], ent->tag[1]);
1877
1878 esp->active_cmd = NULL;
1879 esp_maybe_execute_command(esp);
1880 } else {
1881 shost_printk(KERN_INFO, esp->host,
1882 "Unexpected message %x in freebus\n",
1883 ent->message);
1884 esp_schedule_reset(esp);
1885 return 0;
1886 }
1887 if (esp->active_cmd)
1888 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1889 break;
1890 }
1891 case ESP_EVENT_MSGOUT: {
1892 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1893
1894 if (esp_debug & ESP_DEBUG_MSGOUT) {
1895 int i;
1896 printk("ESP: Sending message [ ");
1897 for (i = 0; i < esp->msg_out_len; i++)
1898 printk("%02x ", esp->msg_out[i]);
1899 printk("]\n");
1900 }
1901
1902 if (esp->rev == FASHME) {
1903 int i;
1904
1905
1906 for (i = 0; i < esp->msg_out_len; i++) {
1907 esp_write8(esp->msg_out[i], ESP_FDATA);
1908 esp_write8(0, ESP_FDATA);
1909 }
1910 scsi_esp_cmd(esp, ESP_CMD_TI);
1911 } else {
1912 if (esp->msg_out_len == 1) {
1913 esp_write8(esp->msg_out[0], ESP_FDATA);
1914 scsi_esp_cmd(esp, ESP_CMD_TI);
1915 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1916 for (i = 0; i < esp->msg_out_len; i++)
1917 esp_write8(esp->msg_out[i], ESP_FDATA);
1918 scsi_esp_cmd(esp, ESP_CMD_TI);
1919 } else {
1920
1921 memcpy(esp->command_block,
1922 esp->msg_out,
1923 esp->msg_out_len);
1924
1925 esp->ops->send_dma_cmd(esp,
1926 esp->command_block_dma,
1927 esp->msg_out_len,
1928 esp->msg_out_len,
1929 0,
1930 ESP_CMD_DMA|ESP_CMD_TI);
1931 }
1932 }
1933 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1934 break;
1935 }
1936 case ESP_EVENT_MSGOUT_DONE:
1937 if (esp->rev == FASHME) {
1938 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1939 } else {
1940 if (esp->msg_out_len > 1)
1941 esp->ops->dma_invalidate(esp);
1942
1943
1944
1945
1946 if (!(esp->ireg & ESP_INTR_DC))
1947 scsi_esp_cmd(esp, ESP_CMD_NULL);
1948 }
1949
1950 esp->msg_out_len = 0;
1951
1952 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1953 goto again;
1954 case ESP_EVENT_MSGIN:
1955 if (esp->ireg & ESP_INTR_BSERV) {
1956 if (esp->rev == FASHME) {
1957 if (!(esp_read8(ESP_STATUS2) &
1958 ESP_STAT2_FEMPTY))
1959 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1960 } else {
1961 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1962 if (esp->rev == ESP100)
1963 scsi_esp_cmd(esp, ESP_CMD_NULL);
1964 }
1965 scsi_esp_cmd(esp, ESP_CMD_TI);
1966 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1967 return 1;
1968 }
1969 if (esp->ireg & ESP_INTR_FDONE) {
1970 u8 val;
1971
1972 if (esp->rev == FASHME)
1973 val = esp->fifo[0];
1974 else
1975 val = esp_read8(ESP_FDATA);
1976 esp->msg_in[esp->msg_in_len++] = val;
1977
1978 esp_log_msgin("Got msgin byte %x\n", val);
1979
1980 if (!esp_msgin_process(esp))
1981 esp->msg_in_len = 0;
1982
1983 if (esp->rev == FASHME)
1984 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1985
1986 scsi_esp_cmd(esp, ESP_CMD_MOK);
1987
1988
1989 if (esp->event == ESP_EVENT_RESET)
1990 return 0;
1991
1992 if (esp->event != ESP_EVENT_FREE_BUS)
1993 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1994 } else {
1995 shost_printk(KERN_INFO, esp->host,
1996 "MSGIN neither BSERV not FDON, resetting");
1997 esp_schedule_reset(esp);
1998 return 0;
1999 }
2000 break;
2001 case ESP_EVENT_CMD_START:
2002 memcpy(esp->command_block, esp->cmd_bytes_ptr,
2003 esp->cmd_bytes_left);
2004 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
2005 esp_event(esp, ESP_EVENT_CMD_DONE);
2006 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2007 break;
2008 case ESP_EVENT_CMD_DONE:
2009 esp->ops->dma_invalidate(esp);
2010 if (esp->ireg & ESP_INTR_BSERV) {
2011 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2012 goto again;
2013 }
2014 esp_schedule_reset(esp);
2015 return 0;
2016
2017 case ESP_EVENT_RESET:
2018 scsi_esp_cmd(esp, ESP_CMD_RS);
2019 break;
2020
2021 default:
2022 shost_printk(KERN_INFO, esp->host,
2023 "Unexpected event %x, resetting\n", esp->event);
2024 esp_schedule_reset(esp);
2025 return 0;
2026 }
2027 return 1;
2028}
2029
2030static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2031{
2032 struct scsi_cmnd *cmd = ent->cmd;
2033
2034 esp_unmap_dma(esp, cmd);
2035 esp_free_lun_tag(ent, cmd->device->hostdata);
2036 cmd->result = DID_RESET << 16;
2037
2038 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
2039 esp_unmap_sense(esp, ent);
2040
2041 cmd->scsi_done(cmd);
2042 list_del(&ent->list);
2043 esp_put_ent(esp, ent);
2044}
2045
2046static void esp_clear_hold(struct scsi_device *dev, void *data)
2047{
2048 struct esp_lun_data *lp = dev->hostdata;
2049
2050 BUG_ON(lp->num_tagged);
2051 lp->hold = 0;
2052}
2053
2054static void esp_reset_cleanup(struct esp *esp)
2055{
2056 struct esp_cmd_entry *ent, *tmp;
2057 int i;
2058
2059 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2060 struct scsi_cmnd *cmd = ent->cmd;
2061
2062 list_del(&ent->list);
2063 cmd->result = DID_RESET << 16;
2064 cmd->scsi_done(cmd);
2065 esp_put_ent(esp, ent);
2066 }
2067
2068 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2069 if (ent == esp->active_cmd)
2070 esp->active_cmd = NULL;
2071 esp_reset_cleanup_one(esp, ent);
2072 }
2073
2074 BUG_ON(esp->active_cmd != NULL);
2075
2076
2077 for (i = 0; i < ESP_MAX_TARGET; i++) {
2078 struct esp_target_data *tp = &esp->target[i];
2079
2080 tp->esp_period = 0;
2081 tp->esp_offset = 0;
2082 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2083 ESP_CONFIG3_FSCSI |
2084 ESP_CONFIG3_FAST);
2085 tp->flags &= ~ESP_TGT_WIDE;
2086 tp->flags |= ESP_TGT_CHECK_NEGO;
2087
2088 if (tp->starget)
2089 __starget_for_each_device(tp->starget, NULL,
2090 esp_clear_hold);
2091 }
2092 esp->flags &= ~ESP_FLAG_RESETTING;
2093}
2094
2095
2096static void __esp_interrupt(struct esp *esp)
2097{
2098 int finish_reset, intr_done;
2099 u8 phase;
2100
2101
2102
2103
2104 esp->sreg = esp_read8(ESP_STATUS);
2105 esp->seqreg = esp_read8(ESP_SSTEP);
2106 esp->ireg = esp_read8(ESP_INTRPT);
2107
2108 if (esp->flags & ESP_FLAG_RESETTING) {
2109 finish_reset = 1;
2110 } else {
2111 if (esp_check_gross_error(esp))
2112 return;
2113
2114 finish_reset = esp_check_spur_intr(esp);
2115 if (finish_reset < 0)
2116 return;
2117 }
2118
2119 if (esp->ireg & ESP_INTR_SR)
2120 finish_reset = 1;
2121
2122 if (finish_reset) {
2123 esp_reset_cleanup(esp);
2124 if (esp->eh_reset) {
2125 complete(esp->eh_reset);
2126 esp->eh_reset = NULL;
2127 }
2128 return;
2129 }
2130
2131 phase = (esp->sreg & ESP_STAT_PMASK);
2132 if (esp->rev == FASHME) {
2133 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2134 esp->select_state == ESP_SELECT_NONE &&
2135 esp->event != ESP_EVENT_STATUS &&
2136 esp->event != ESP_EVENT_DATA_DONE) ||
2137 (esp->ireg & ESP_INTR_RSEL)) {
2138 esp->sreg2 = esp_read8(ESP_STATUS2);
2139 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2140 (esp->sreg2 & ESP_STAT2_F1BYTE))
2141 hme_read_fifo(esp);
2142 }
2143 }
2144
2145 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
2146 "sreg2[%02x] ireg[%02x]\n",
2147 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2148
2149 intr_done = 0;
2150
2151 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2152 shost_printk(KERN_INFO, esp->host,
2153 "unexpected IREG %02x\n", esp->ireg);
2154 if (esp->ireg & ESP_INTR_IC)
2155 esp_dump_cmd_log(esp);
2156
2157 esp_schedule_reset(esp);
2158 } else {
2159 if (esp->ireg & ESP_INTR_RSEL) {
2160 if (esp->active_cmd)
2161 (void) esp_finish_select(esp);
2162 intr_done = esp_reconnect(esp);
2163 } else {
2164
2165 if (esp->select_state != ESP_SELECT_NONE)
2166 intr_done = esp_finish_select(esp);
2167 }
2168 }
2169 while (!intr_done)
2170 intr_done = esp_process_event(esp);
2171}
2172
2173irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2174{
2175 struct esp *esp = dev_id;
2176 unsigned long flags;
2177 irqreturn_t ret;
2178
2179 spin_lock_irqsave(esp->host->host_lock, flags);
2180 ret = IRQ_NONE;
2181 if (esp->ops->irq_pending(esp)) {
2182 ret = IRQ_HANDLED;
2183 for (;;) {
2184 int i;
2185
2186 __esp_interrupt(esp);
2187 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2188 break;
2189 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2190
2191 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2192 if (esp->ops->irq_pending(esp))
2193 break;
2194 }
2195 if (i == ESP_QUICKIRQ_LIMIT)
2196 break;
2197 }
2198 }
2199 spin_unlock_irqrestore(esp->host->host_lock, flags);
2200
2201 return ret;
2202}
2203EXPORT_SYMBOL(scsi_esp_intr);
2204
2205static void esp_get_revision(struct esp *esp)
2206{
2207 u8 val;
2208
2209 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2210 if (esp->config2 == 0) {
2211 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2212 esp_write8(esp->config2, ESP_CFG2);
2213
2214 val = esp_read8(ESP_CFG2);
2215 val &= ~ESP_CONFIG2_MAGIC;
2216
2217 esp->config2 = 0;
2218 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2219
2220
2221
2222
2223
2224 esp->rev = ESP100;
2225 return;
2226 }
2227 }
2228
2229 esp_set_all_config3(esp, 5);
2230 esp->prev_cfg3 = 5;
2231 esp_write8(esp->config2, ESP_CFG2);
2232 esp_write8(0, ESP_CFG3);
2233 esp_write8(esp->prev_cfg3, ESP_CFG3);
2234
2235 val = esp_read8(ESP_CFG3);
2236 if (val != 5) {
2237
2238
2239
2240 esp->rev = ESP100A;
2241 } else {
2242 esp_set_all_config3(esp, 0);
2243 esp->prev_cfg3 = 0;
2244 esp_write8(esp->prev_cfg3, ESP_CFG3);
2245
2246
2247
2248
2249 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2250 esp->rev = FAST;
2251 esp->sync_defp = SYNC_DEFP_FAST;
2252 } else {
2253 esp->rev = ESP236;
2254 }
2255 }
2256}
2257
2258static void esp_init_swstate(struct esp *esp)
2259{
2260 int i;
2261
2262 INIT_LIST_HEAD(&esp->queued_cmds);
2263 INIT_LIST_HEAD(&esp->active_cmds);
2264 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2265
2266
2267
2268
2269
2270 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2271 esp->target[i].flags = 0;
2272 esp->target[i].nego_goal_period = 0;
2273 esp->target[i].nego_goal_offset = 0;
2274 esp->target[i].nego_goal_width = 0;
2275 esp->target[i].nego_goal_tags = 0;
2276 }
2277}
2278
2279
2280static void esp_bootup_reset(struct esp *esp)
2281{
2282 u8 val;
2283
2284
2285 esp->ops->reset_dma(esp);
2286
2287
2288 esp_reset_esp(esp);
2289
2290
2291 val = esp_read8(ESP_CFG1);
2292 val |= ESP_CONFIG1_SRRDISAB;
2293 esp_write8(val, ESP_CFG1);
2294
2295 scsi_esp_cmd(esp, ESP_CMD_RS);
2296 udelay(400);
2297
2298 esp_write8(esp->config1, ESP_CFG1);
2299
2300
2301 esp_read8(ESP_INTRPT);
2302}
2303
2304static void esp_set_clock_params(struct esp *esp)
2305{
2306 int fhz;
2307 u8 ccf;
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341 fhz = esp->cfreq;
2342
2343 ccf = ((fhz / 1000000) + 4) / 5;
2344 if (ccf == 1)
2345 ccf = 2;
2346
2347
2348
2349
2350
2351
2352 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2353 fhz = 20000000;
2354 ccf = 4;
2355 }
2356
2357 esp->cfact = (ccf == 8 ? 0 : ccf);
2358 esp->cfreq = fhz;
2359 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2360 esp->ctick = ESP_TICK(ccf, esp->ccycle);
2361 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2362 esp->sync_defp = SYNC_DEFP_SLOW;
2363}
2364
2365static const char *esp_chip_names[] = {
2366 "ESP100",
2367 "ESP100A",
2368 "ESP236",
2369 "FAS236",
2370 "AM53C974",
2371 "53CF9x-2",
2372 "FAS100A",
2373 "FAST",
2374 "FASHME",
2375};
2376
2377static struct scsi_transport_template *esp_transport_template;
2378
2379int scsi_esp_register(struct esp *esp)
2380{
2381 static int instance;
2382 int err;
2383
2384 if (!esp->num_tags)
2385 esp->num_tags = ESP_DEFAULT_TAGS;
2386 esp->host->transportt = esp_transport_template;
2387 esp->host->max_lun = ESP_MAX_LUN;
2388 esp->host->cmd_per_lun = 2;
2389 esp->host->unique_id = instance;
2390
2391 esp_set_clock_params(esp);
2392
2393 esp_get_revision(esp);
2394
2395 esp_init_swstate(esp);
2396
2397 esp_bootup_reset(esp);
2398
2399 dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2400 esp->host->unique_id, esp->regs, esp->dma_regs,
2401 esp->host->irq);
2402 dev_printk(KERN_INFO, esp->dev,
2403 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2404 esp->host->unique_id, esp_chip_names[esp->rev],
2405 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2406
2407
2408 ssleep(esp_bus_reset_settle);
2409
2410 err = scsi_add_host(esp->host, esp->dev);
2411 if (err)
2412 return err;
2413
2414 instance++;
2415
2416 scsi_scan_host(esp->host);
2417
2418 return 0;
2419}
2420EXPORT_SYMBOL(scsi_esp_register);
2421
2422void scsi_esp_unregister(struct esp *esp)
2423{
2424 scsi_remove_host(esp->host);
2425}
2426EXPORT_SYMBOL(scsi_esp_unregister);
2427
2428static int esp_target_alloc(struct scsi_target *starget)
2429{
2430 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2431 struct esp_target_data *tp = &esp->target[starget->id];
2432
2433 tp->starget = starget;
2434
2435 return 0;
2436}
2437
2438static void esp_target_destroy(struct scsi_target *starget)
2439{
2440 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2441 struct esp_target_data *tp = &esp->target[starget->id];
2442
2443 tp->starget = NULL;
2444}
2445
2446static int esp_slave_alloc(struct scsi_device *dev)
2447{
2448 struct esp *esp = shost_priv(dev->host);
2449 struct esp_target_data *tp = &esp->target[dev->id];
2450 struct esp_lun_data *lp;
2451
2452 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2453 if (!lp)
2454 return -ENOMEM;
2455 dev->hostdata = lp;
2456
2457 spi_min_period(tp->starget) = esp->min_period;
2458 spi_max_offset(tp->starget) = 15;
2459
2460 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2461 spi_max_width(tp->starget) = 1;
2462 else
2463 spi_max_width(tp->starget) = 0;
2464
2465 return 0;
2466}
2467
2468static int esp_slave_configure(struct scsi_device *dev)
2469{
2470 struct esp *esp = shost_priv(dev->host);
2471 struct esp_target_data *tp = &esp->target[dev->id];
2472
2473 if (dev->tagged_supported)
2474 scsi_change_queue_depth(dev, esp->num_tags);
2475
2476 tp->flags |= ESP_TGT_DISCONNECT;
2477
2478 if (!spi_initial_dv(dev->sdev_target))
2479 spi_dv_device(dev);
2480
2481 return 0;
2482}
2483
2484static void esp_slave_destroy(struct scsi_device *dev)
2485{
2486 struct esp_lun_data *lp = dev->hostdata;
2487
2488 kfree(lp);
2489 dev->hostdata = NULL;
2490}
2491
2492static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2493{
2494 struct esp *esp = shost_priv(cmd->device->host);
2495 struct esp_cmd_entry *ent, *tmp;
2496 struct completion eh_done;
2497 unsigned long flags;
2498
2499
2500
2501
2502 spin_lock_irqsave(esp->host->host_lock, flags);
2503 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2504 cmd, cmd->cmnd[0]);
2505 ent = esp->active_cmd;
2506 if (ent)
2507 shost_printk(KERN_ERR, esp->host,
2508 "Current command [%p:%02x]\n",
2509 ent->cmd, ent->cmd->cmnd[0]);
2510 list_for_each_entry(ent, &esp->queued_cmds, list) {
2511 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2512 ent->cmd, ent->cmd->cmnd[0]);
2513 }
2514 list_for_each_entry(ent, &esp->active_cmds, list) {
2515 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2516 ent->cmd, ent->cmd->cmnd[0]);
2517 }
2518 esp_dump_cmd_log(esp);
2519 spin_unlock_irqrestore(esp->host->host_lock, flags);
2520
2521 spin_lock_irqsave(esp->host->host_lock, flags);
2522
2523 ent = NULL;
2524 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2525 if (tmp->cmd == cmd) {
2526 ent = tmp;
2527 break;
2528 }
2529 }
2530
2531 if (ent) {
2532
2533
2534
2535 list_del(&ent->list);
2536
2537 cmd->result = DID_ABORT << 16;
2538 cmd->scsi_done(cmd);
2539
2540 esp_put_ent(esp, ent);
2541
2542 goto out_success;
2543 }
2544
2545 init_completion(&eh_done);
2546
2547 ent = esp->active_cmd;
2548 if (ent && ent->cmd == cmd) {
2549
2550
2551
2552
2553 if (esp->msg_out_len)
2554 goto out_failure;
2555
2556
2557
2558
2559 esp->msg_out[0] = ABORT_TASK_SET;
2560 esp->msg_out_len = 1;
2561 ent->eh_done = &eh_done;
2562
2563 scsi_esp_cmd(esp, ESP_CMD_SATN);
2564 } else {
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581 goto out_failure;
2582 }
2583
2584 spin_unlock_irqrestore(esp->host->host_lock, flags);
2585
2586 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2587 spin_lock_irqsave(esp->host->host_lock, flags);
2588 ent->eh_done = NULL;
2589 spin_unlock_irqrestore(esp->host->host_lock, flags);
2590
2591 return FAILED;
2592 }
2593
2594 return SUCCESS;
2595
2596out_success:
2597 spin_unlock_irqrestore(esp->host->host_lock, flags);
2598 return SUCCESS;
2599
2600out_failure:
2601
2602
2603
2604
2605 spin_unlock_irqrestore(esp->host->host_lock, flags);
2606 return FAILED;
2607}
2608
2609static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2610{
2611 struct esp *esp = shost_priv(cmd->device->host);
2612 struct completion eh_reset;
2613 unsigned long flags;
2614
2615 init_completion(&eh_reset);
2616
2617 spin_lock_irqsave(esp->host->host_lock, flags);
2618
2619 esp->eh_reset = &eh_reset;
2620
2621
2622
2623
2624
2625
2626 esp->flags |= ESP_FLAG_RESETTING;
2627 scsi_esp_cmd(esp, ESP_CMD_RS);
2628
2629 spin_unlock_irqrestore(esp->host->host_lock, flags);
2630
2631 ssleep(esp_bus_reset_settle);
2632
2633 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2634 spin_lock_irqsave(esp->host->host_lock, flags);
2635 esp->eh_reset = NULL;
2636 spin_unlock_irqrestore(esp->host->host_lock, flags);
2637
2638 return FAILED;
2639 }
2640
2641 return SUCCESS;
2642}
2643
2644
2645static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2646{
2647 struct esp *esp = shost_priv(cmd->device->host);
2648 unsigned long flags;
2649
2650 spin_lock_irqsave(esp->host->host_lock, flags);
2651 esp_bootup_reset(esp);
2652 esp_reset_cleanup(esp);
2653 spin_unlock_irqrestore(esp->host->host_lock, flags);
2654
2655 ssleep(esp_bus_reset_settle);
2656
2657 return SUCCESS;
2658}
2659
2660static const char *esp_info(struct Scsi_Host *host)
2661{
2662 return "esp";
2663}
2664
2665struct scsi_host_template scsi_esp_template = {
2666 .module = THIS_MODULE,
2667 .name = "esp",
2668 .info = esp_info,
2669 .queuecommand = esp_queuecommand,
2670 .target_alloc = esp_target_alloc,
2671 .target_destroy = esp_target_destroy,
2672 .slave_alloc = esp_slave_alloc,
2673 .slave_configure = esp_slave_configure,
2674 .slave_destroy = esp_slave_destroy,
2675 .eh_abort_handler = esp_eh_abort_handler,
2676 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2677 .eh_host_reset_handler = esp_eh_host_reset_handler,
2678 .can_queue = 7,
2679 .this_id = 7,
2680 .sg_tablesize = SG_ALL,
2681 .max_sectors = 0xffff,
2682 .skip_settle_delay = 1,
2683};
2684EXPORT_SYMBOL(scsi_esp_template);
2685
2686static void esp_get_signalling(struct Scsi_Host *host)
2687{
2688 struct esp *esp = shost_priv(host);
2689 enum spi_signal_type type;
2690
2691 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2692 type = SPI_SIGNAL_HVD;
2693 else
2694 type = SPI_SIGNAL_SE;
2695
2696 spi_signalling(host) = type;
2697}
2698
2699static void esp_set_offset(struct scsi_target *target, int offset)
2700{
2701 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2702 struct esp *esp = shost_priv(host);
2703 struct esp_target_data *tp = &esp->target[target->id];
2704
2705 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2706 tp->nego_goal_offset = 0;
2707 else
2708 tp->nego_goal_offset = offset;
2709 tp->flags |= ESP_TGT_CHECK_NEGO;
2710}
2711
2712static void esp_set_period(struct scsi_target *target, int period)
2713{
2714 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2715 struct esp *esp = shost_priv(host);
2716 struct esp_target_data *tp = &esp->target[target->id];
2717
2718 tp->nego_goal_period = period;
2719 tp->flags |= ESP_TGT_CHECK_NEGO;
2720}
2721
2722static void esp_set_width(struct scsi_target *target, int width)
2723{
2724 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2725 struct esp *esp = shost_priv(host);
2726 struct esp_target_data *tp = &esp->target[target->id];
2727
2728 tp->nego_goal_width = (width ? 1 : 0);
2729 tp->flags |= ESP_TGT_CHECK_NEGO;
2730}
2731
2732static struct spi_function_template esp_transport_ops = {
2733 .set_offset = esp_set_offset,
2734 .show_offset = 1,
2735 .set_period = esp_set_period,
2736 .show_period = 1,
2737 .set_width = esp_set_width,
2738 .show_width = 1,
2739 .get_signalling = esp_get_signalling,
2740};
2741
2742static int __init esp_init(void)
2743{
2744 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2745 sizeof(struct esp_cmd_priv));
2746
2747 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2748 if (!esp_transport_template)
2749 return -ENODEV;
2750
2751 return 0;
2752}
2753
2754static void __exit esp_exit(void)
2755{
2756 spi_release_transport(esp_transport_template);
2757}
2758
2759MODULE_DESCRIPTION("ESP SCSI driver core");
2760MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2761MODULE_LICENSE("GPL");
2762MODULE_VERSION(DRV_VERSION);
2763
2764module_param(esp_bus_reset_settle, int, 0);
2765MODULE_PARM_DESC(esp_bus_reset_settle,
2766 "ESP scsi bus reset delay in seconds");
2767
2768module_param(esp_debug, int, 0);
2769MODULE_PARM_DESC(esp_debug,
2770"ESP bitmapped debugging message enable value:\n"
2771" 0x00000001 Log interrupt events\n"
2772" 0x00000002 Log scsi commands\n"
2773" 0x00000004 Log resets\n"
2774" 0x00000008 Log message in events\n"
2775" 0x00000010 Log message out events\n"
2776" 0x00000020 Log command completion\n"
2777" 0x00000040 Log disconnects\n"
2778" 0x00000080 Log data start\n"
2779" 0x00000100 Log data done\n"
2780" 0x00000200 Log reconnects\n"
2781" 0x00000400 Log auto-sense data\n"
2782);
2783
2784module_init(esp_init);
2785module_exit(esp_exit);
2786
2787#ifdef CONFIG_SCSI_ESP_PIO
2788static inline unsigned int esp_wait_for_fifo(struct esp *esp)
2789{
2790 int i = 500000;
2791
2792 do {
2793 unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
2794
2795 if (fbytes)
2796 return fbytes;
2797
2798 udelay(1);
2799 } while (--i);
2800
2801 shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
2802 esp_read8(ESP_STATUS));
2803 return 0;
2804}
2805
2806static inline int esp_wait_for_intr(struct esp *esp)
2807{
2808 int i = 500000;
2809
2810 do {
2811 esp->sreg = esp_read8(ESP_STATUS);
2812 if (esp->sreg & ESP_STAT_INTR)
2813 return 0;
2814
2815 udelay(1);
2816 } while (--i);
2817
2818 shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
2819 esp->sreg);
2820 return 1;
2821}
2822
2823#define ESP_FIFO_SIZE 16
2824
2825void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
2826 u32 dma_count, int write, u8 cmd)
2827{
2828 u8 phase = esp->sreg & ESP_STAT_PMASK;
2829
2830 cmd &= ~ESP_CMD_DMA;
2831 esp->send_cmd_error = 0;
2832
2833 if (write) {
2834 u8 *dst = (u8 *)addr;
2835 u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
2836
2837 scsi_esp_cmd(esp, cmd);
2838
2839 while (1) {
2840 if (!esp_wait_for_fifo(esp))
2841 break;
2842
2843 *dst++ = readb(esp->fifo_reg);
2844 --esp_count;
2845
2846 if (!esp_count)
2847 break;
2848
2849 if (esp_wait_for_intr(esp)) {
2850 esp->send_cmd_error = 1;
2851 break;
2852 }
2853
2854 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2855 break;
2856
2857 esp->ireg = esp_read8(ESP_INTRPT);
2858 if (esp->ireg & mask) {
2859 esp->send_cmd_error = 1;
2860 break;
2861 }
2862
2863 if (phase == ESP_MIP)
2864 esp_write8(ESP_CMD_MOK, ESP_CMD);
2865
2866 esp_write8(ESP_CMD_TI, ESP_CMD);
2867 }
2868 } else {
2869 unsigned int n = ESP_FIFO_SIZE;
2870 u8 *src = (u8 *)addr;
2871
2872 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
2873
2874 if (n > esp_count)
2875 n = esp_count;
2876 writesb(esp->fifo_reg, src, n);
2877 src += n;
2878 esp_count -= n;
2879
2880 scsi_esp_cmd(esp, cmd);
2881
2882 while (esp_count) {
2883 if (esp_wait_for_intr(esp)) {
2884 esp->send_cmd_error = 1;
2885 break;
2886 }
2887
2888 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2889 break;
2890
2891 esp->ireg = esp_read8(ESP_INTRPT);
2892 if (esp->ireg & ~ESP_INTR_BSERV) {
2893 esp->send_cmd_error = 1;
2894 break;
2895 }
2896
2897 n = ESP_FIFO_SIZE -
2898 (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
2899
2900 if (n > esp_count)
2901 n = esp_count;
2902 writesb(esp->fifo_reg, src, n);
2903 src += n;
2904 esp_count -= n;
2905
2906 esp_write8(ESP_CMD_TI, ESP_CMD);
2907 }
2908 }
2909
2910 esp->send_cmd_residual = esp_count;
2911}
2912EXPORT_SYMBOL(esp_send_pio_cmd);
2913#endif
2914