1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/slab.h>
10#include <linux/delay.h>
11#include <linux/list.h>
12#include <linux/completion.h>
13#include <linux/kallsyms.h>
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16#include <linux/init.h>
17#include <linux/irqreturn.h>
18
19#include <asm/irq.h>
20#include <asm/io.h>
21#include <asm/dma.h>
22
23#include <scsi/scsi.h>
24#include <scsi/scsi_host.h>
25#include <scsi/scsi_cmnd.h>
26#include <scsi/scsi_device.h>
27#include <scsi/scsi_tcq.h>
28#include <scsi/scsi_dbg.h>
29#include <scsi/scsi_transport_spi.h>
30
31#include "esp_scsi.h"
32
33#define DRV_MODULE_NAME "esp"
34#define PFX DRV_MODULE_NAME ": "
35#define DRV_VERSION "2.000"
36#define DRV_MODULE_RELDATE "April 19, 2007"
37
38
39static int esp_bus_reset_settle = 3;
40
41static u32 esp_debug;
42#define ESP_DEBUG_INTR 0x00000001
43#define ESP_DEBUG_SCSICMD 0x00000002
44#define ESP_DEBUG_RESET 0x00000004
45#define ESP_DEBUG_MSGIN 0x00000008
46#define ESP_DEBUG_MSGOUT 0x00000010
47#define ESP_DEBUG_CMDDONE 0x00000020
48#define ESP_DEBUG_DISCONNECT 0x00000040
49#define ESP_DEBUG_DATASTART 0x00000080
50#define ESP_DEBUG_DATADONE 0x00000100
51#define ESP_DEBUG_RECONNECT 0x00000200
52#define ESP_DEBUG_AUTOSENSE 0x00000400
53#define ESP_DEBUG_EVENT 0x00000800
54#define ESP_DEBUG_COMMAND 0x00001000
55
56#define esp_log_intr(f, a...) \
57do { if (esp_debug & ESP_DEBUG_INTR) \
58 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
59} while (0)
60
61#define esp_log_reset(f, a...) \
62do { if (esp_debug & ESP_DEBUG_RESET) \
63 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
64} while (0)
65
66#define esp_log_msgin(f, a...) \
67do { if (esp_debug & ESP_DEBUG_MSGIN) \
68 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
69} while (0)
70
71#define esp_log_msgout(f, a...) \
72do { if (esp_debug & ESP_DEBUG_MSGOUT) \
73 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
74} while (0)
75
76#define esp_log_cmddone(f, a...) \
77do { if (esp_debug & ESP_DEBUG_CMDDONE) \
78 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
79} while (0)
80
81#define esp_log_disconnect(f, a...) \
82do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
83 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
84} while (0)
85
86#define esp_log_datastart(f, a...) \
87do { if (esp_debug & ESP_DEBUG_DATASTART) \
88 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
89} while (0)
90
91#define esp_log_datadone(f, a...) \
92do { if (esp_debug & ESP_DEBUG_DATADONE) \
93 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
94} while (0)
95
96#define esp_log_reconnect(f, a...) \
97do { if (esp_debug & ESP_DEBUG_RECONNECT) \
98 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
99} while (0)
100
101#define esp_log_autosense(f, a...) \
102do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
103 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
104} while (0)
105
106#define esp_log_event(f, a...) \
107do { if (esp_debug & ESP_DEBUG_EVENT) \
108 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
109} while (0)
110
111#define esp_log_command(f, a...) \
112do { if (esp_debug & ESP_DEBUG_COMMAND) \
113 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
114} while (0)
115
116#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
117#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
118
119static void esp_log_fill_regs(struct esp *esp,
120 struct esp_event_ent *p)
121{
122 p->sreg = esp->sreg;
123 p->seqreg = esp->seqreg;
124 p->sreg2 = esp->sreg2;
125 p->ireg = esp->ireg;
126 p->select_state = esp->select_state;
127 p->event = esp->event;
128}
129
130void scsi_esp_cmd(struct esp *esp, u8 val)
131{
132 struct esp_event_ent *p;
133 int idx = esp->esp_event_cur;
134
135 p = &esp->esp_event_log[idx];
136 p->type = ESP_EVENT_TYPE_CMD;
137 p->val = val;
138 esp_log_fill_regs(esp, p);
139
140 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
141
142 esp_log_command("cmd[%02x]\n", val);
143 esp_write8(val, ESP_CMD);
144}
145EXPORT_SYMBOL(scsi_esp_cmd);
146
147static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
148{
149 if (esp->flags & ESP_FLAG_USE_FIFO) {
150 int i;
151
152 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
153 for (i = 0; i < len; i++)
154 esp_write8(esp->command_block[i], ESP_FDATA);
155 scsi_esp_cmd(esp, cmd);
156 } else {
157 if (esp->rev == FASHME)
158 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
159 cmd |= ESP_CMD_DMA;
160 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
161 len, max_len, 0, cmd);
162 }
163}
164
165static void esp_event(struct esp *esp, u8 val)
166{
167 struct esp_event_ent *p;
168 int idx = esp->esp_event_cur;
169
170 p = &esp->esp_event_log[idx];
171 p->type = ESP_EVENT_TYPE_EVENT;
172 p->val = val;
173 esp_log_fill_regs(esp, p);
174
175 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
176
177 esp->event = val;
178}
179
180static void esp_dump_cmd_log(struct esp *esp)
181{
182 int idx = esp->esp_event_cur;
183 int stop = idx;
184
185 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
186 do {
187 struct esp_event_ent *p = &esp->esp_event_log[idx];
188
189 shost_printk(KERN_INFO, esp->host,
190 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
191 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
192 idx,
193 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
194 p->val, p->sreg, p->seqreg,
195 p->sreg2, p->ireg, p->select_state, p->event);
196
197 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
198 } while (idx != stop);
199}
200
201static void esp_flush_fifo(struct esp *esp)
202{
203 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
204 if (esp->rev == ESP236) {
205 int lim = 1000;
206
207 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
208 if (--lim == 0) {
209 shost_printk(KERN_ALERT, esp->host,
210 "ESP_FF_BYTES will not clear!\n");
211 break;
212 }
213 udelay(1);
214 }
215 }
216}
217
218static void hme_read_fifo(struct esp *esp)
219{
220 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
221 int idx = 0;
222
223 while (fcnt--) {
224 esp->fifo[idx++] = esp_read8(ESP_FDATA);
225 esp->fifo[idx++] = esp_read8(ESP_FDATA);
226 }
227 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
228 esp_write8(0, ESP_FDATA);
229 esp->fifo[idx++] = esp_read8(ESP_FDATA);
230 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
231 }
232 esp->fifo_cnt = idx;
233}
234
235static void esp_set_all_config3(struct esp *esp, u8 val)
236{
237 int i;
238
239 for (i = 0; i < ESP_MAX_TARGET; i++)
240 esp->target[i].esp_config3 = val;
241}
242
243
244static void esp_reset_esp(struct esp *esp)
245{
246 u8 family_code, version;
247
248
249 scsi_esp_cmd(esp, ESP_CMD_RC);
250 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
251 if (esp->rev == FAST)
252 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
253 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
254
255
256
257
258 esp->max_period = ((35 * esp->ccycle) / 1000);
259 if (esp->rev == FAST) {
260 version = esp_read8(ESP_UID);
261 family_code = (version & 0xf8) >> 3;
262 if (family_code == 0x02)
263 esp->rev = FAS236;
264 else if (family_code == 0x0a)
265 esp->rev = FASHME;
266 else
267 esp->rev = FAS100A;
268 esp->min_period = ((4 * esp->ccycle) / 1000);
269 } else {
270 esp->min_period = ((5 * esp->ccycle) / 1000);
271 }
272 if (esp->rev == FAS236) {
273
274
275
276
277 u8 config4 = ESP_CONFIG4_GE1;
278 esp_write8(config4, ESP_CFG4);
279 config4 = esp_read8(ESP_CFG4);
280 if (config4 & ESP_CONFIG4_GE1) {
281 esp->rev = PCSCSI;
282 esp_write8(esp->config4, ESP_CFG4);
283 }
284 }
285 esp->max_period = (esp->max_period + 3)>>2;
286 esp->min_period = (esp->min_period + 3)>>2;
287
288 esp_write8(esp->config1, ESP_CFG1);
289 switch (esp->rev) {
290 case ESP100:
291
292 break;
293
294 case ESP100A:
295 esp_write8(esp->config2, ESP_CFG2);
296 break;
297
298 case ESP236:
299
300 esp_write8(esp->config2, ESP_CFG2);
301 esp->prev_cfg3 = esp->target[0].esp_config3;
302 esp_write8(esp->prev_cfg3, ESP_CFG3);
303 break;
304
305 case FASHME:
306 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
307
308
309 case FAS236:
310 case PCSCSI:
311
312 esp_write8(esp->config2, ESP_CFG2);
313 if (esp->rev == FASHME) {
314 u8 cfg3 = esp->target[0].esp_config3;
315
316 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
317 if (esp->scsi_id >= 8)
318 cfg3 |= ESP_CONFIG3_IDBIT3;
319 esp_set_all_config3(esp, cfg3);
320 } else {
321 u32 cfg3 = esp->target[0].esp_config3;
322
323 cfg3 |= ESP_CONFIG3_FCLK;
324 esp_set_all_config3(esp, cfg3);
325 }
326 esp->prev_cfg3 = esp->target[0].esp_config3;
327 esp_write8(esp->prev_cfg3, ESP_CFG3);
328 if (esp->rev == FASHME) {
329 esp->radelay = 80;
330 } else {
331 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
332 esp->radelay = 0;
333 else
334 esp->radelay = 96;
335 }
336 break;
337
338 case FAS100A:
339
340 esp_write8(esp->config2, ESP_CFG2);
341 esp_set_all_config3(esp,
342 (esp->target[0].esp_config3 |
343 ESP_CONFIG3_FCLOCK));
344 esp->prev_cfg3 = esp->target[0].esp_config3;
345 esp_write8(esp->prev_cfg3, ESP_CFG3);
346 esp->radelay = 32;
347 break;
348
349 default:
350 break;
351 }
352
353
354 esp_write8(esp->cfact, ESP_CFACT);
355
356 esp->prev_stp = 0;
357 esp_write8(esp->prev_stp, ESP_STP);
358
359 esp->prev_soff = 0;
360 esp_write8(esp->prev_soff, ESP_SOFF);
361
362 esp_write8(esp->neg_defp, ESP_TIMEO);
363
364
365 esp_read8(ESP_INTRPT);
366 udelay(100);
367}
368
369static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
370{
371 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
372 struct scatterlist *sg = scsi_sglist(cmd);
373 int total = 0, i;
374
375 if (cmd->sc_data_direction == DMA_NONE)
376 return;
377
378 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
379
380
381
382
383 spriv->num_sg = scsi_sg_count(cmd);
384 for (i = 0; i < spriv->num_sg; i++) {
385 sg[i].dma_address = (uintptr_t)sg_virt(&sg[i]);
386 total += sg_dma_len(&sg[i]);
387 }
388 } else {
389 spriv->num_sg = scsi_dma_map(cmd);
390 for (i = 0; i < spriv->num_sg; i++)
391 total += sg_dma_len(&sg[i]);
392 }
393 spriv->cur_residue = sg_dma_len(sg);
394 spriv->cur_sg = sg;
395 spriv->tot_residue = total;
396}
397
398static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
399 struct scsi_cmnd *cmd)
400{
401 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
402
403 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
404 return ent->sense_dma +
405 (ent->sense_ptr - cmd->sense_buffer);
406 }
407
408 return sg_dma_address(p->cur_sg) +
409 (sg_dma_len(p->cur_sg) -
410 p->cur_residue);
411}
412
413static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
414 struct scsi_cmnd *cmd)
415{
416 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
417
418 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
419 return SCSI_SENSE_BUFFERSIZE -
420 (ent->sense_ptr - cmd->sense_buffer);
421 }
422 return p->cur_residue;
423}
424
425static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
426 struct scsi_cmnd *cmd, unsigned int len)
427{
428 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
429
430 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
431 ent->sense_ptr += len;
432 return;
433 }
434
435 p->cur_residue -= len;
436 p->tot_residue -= len;
437 if (p->cur_residue < 0 || p->tot_residue < 0) {
438 shost_printk(KERN_ERR, esp->host,
439 "Data transfer overflow.\n");
440 shost_printk(KERN_ERR, esp->host,
441 "cur_residue[%d] tot_residue[%d] len[%u]\n",
442 p->cur_residue, p->tot_residue, len);
443 p->cur_residue = 0;
444 p->tot_residue = 0;
445 }
446 if (!p->cur_residue && p->tot_residue) {
447 p->cur_sg++;
448 p->cur_residue = sg_dma_len(p->cur_sg);
449 }
450}
451
452static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
453{
454 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
455 scsi_dma_unmap(cmd);
456}
457
458static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
459{
460 struct scsi_cmnd *cmd = ent->cmd;
461 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
462
463 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
464 ent->saved_sense_ptr = ent->sense_ptr;
465 return;
466 }
467 ent->saved_cur_residue = spriv->cur_residue;
468 ent->saved_cur_sg = spriv->cur_sg;
469 ent->saved_tot_residue = spriv->tot_residue;
470}
471
472static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
473{
474 struct scsi_cmnd *cmd = ent->cmd;
475 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
476
477 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
478 ent->sense_ptr = ent->saved_sense_ptr;
479 return;
480 }
481 spriv->cur_residue = ent->saved_cur_residue;
482 spriv->cur_sg = ent->saved_cur_sg;
483 spriv->tot_residue = ent->saved_tot_residue;
484}
485
486static void esp_write_tgt_config3(struct esp *esp, int tgt)
487{
488 if (esp->rev > ESP100A) {
489 u8 val = esp->target[tgt].esp_config3;
490
491 if (val != esp->prev_cfg3) {
492 esp->prev_cfg3 = val;
493 esp_write8(val, ESP_CFG3);
494 }
495 }
496}
497
498static void esp_write_tgt_sync(struct esp *esp, int tgt)
499{
500 u8 off = esp->target[tgt].esp_offset;
501 u8 per = esp->target[tgt].esp_period;
502
503 if (off != esp->prev_soff) {
504 esp->prev_soff = off;
505 esp_write8(off, ESP_SOFF);
506 }
507 if (per != esp->prev_stp) {
508 esp->prev_stp = per;
509 esp_write8(per, ESP_STP);
510 }
511}
512
513static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
514{
515 if (esp->rev == FASHME) {
516
517 if (dma_len > (1U << 24))
518 dma_len = (1U << 24);
519 } else {
520 u32 base, end;
521
522
523
524
525
526
527
528 if (dma_len > (1U << 16))
529 dma_len = (1U << 16);
530
531
532
533
534 base = dma_addr & ((1U << 24) - 1U);
535 end = base + dma_len;
536 if (end > (1U << 24))
537 end = (1U <<24);
538 dma_len = end - base;
539 }
540 return dma_len;
541}
542
543static int esp_need_to_nego_wide(struct esp_target_data *tp)
544{
545 struct scsi_target *target = tp->starget;
546
547 return spi_width(target) != tp->nego_goal_width;
548}
549
550static int esp_need_to_nego_sync(struct esp_target_data *tp)
551{
552 struct scsi_target *target = tp->starget;
553
554
555 if (!spi_offset(target) && !tp->nego_goal_offset)
556 return 0;
557
558 if (spi_offset(target) == tp->nego_goal_offset &&
559 spi_period(target) == tp->nego_goal_period)
560 return 0;
561
562 return 1;
563}
564
565static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
566 struct esp_lun_data *lp)
567{
568 if (!ent->orig_tag[0]) {
569
570 if (lp->non_tagged_cmd)
571 return -EBUSY;
572
573 if (lp->hold) {
574
575
576
577 if (lp->num_tagged)
578 return -EBUSY;
579
580
581
582
583 lp->hold = 0;
584 } else if (lp->num_tagged) {
585
586
587
588 lp->hold = 1;
589 return -EBUSY;
590 }
591
592 lp->non_tagged_cmd = ent;
593 return 0;
594 }
595
596
597 if (lp->non_tagged_cmd || lp->hold)
598 return -EBUSY;
599
600 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
601
602 lp->tagged_cmds[ent->orig_tag[1]] = ent;
603 lp->num_tagged++;
604
605 return 0;
606}
607
608static void esp_free_lun_tag(struct esp_cmd_entry *ent,
609 struct esp_lun_data *lp)
610{
611 if (ent->orig_tag[0]) {
612 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
613 lp->tagged_cmds[ent->orig_tag[1]] = NULL;
614 lp->num_tagged--;
615 } else {
616 BUG_ON(lp->non_tagged_cmd != ent);
617 lp->non_tagged_cmd = NULL;
618 }
619}
620
621static void esp_map_sense(struct esp *esp, struct esp_cmd_entry *ent)
622{
623 ent->sense_ptr = ent->cmd->sense_buffer;
624 if (esp->flags & ESP_FLAG_NO_DMA_MAP) {
625 ent->sense_dma = (uintptr_t)ent->sense_ptr;
626 return;
627 }
628
629 ent->sense_dma = dma_map_single(esp->dev, ent->sense_ptr,
630 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
631}
632
633static void esp_unmap_sense(struct esp *esp, struct esp_cmd_entry *ent)
634{
635 if (!(esp->flags & ESP_FLAG_NO_DMA_MAP))
636 dma_unmap_single(esp->dev, ent->sense_dma,
637 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
638 ent->sense_ptr = NULL;
639}
640
641
642
643
644
645
646
647
648static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
649{
650 struct scsi_cmnd *cmd = ent->cmd;
651 struct scsi_device *dev = cmd->device;
652 int tgt, lun;
653 u8 *p, val;
654
655 tgt = dev->id;
656 lun = dev->lun;
657
658
659 if (!ent->sense_ptr) {
660 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
661 tgt, lun);
662 esp_map_sense(esp, ent);
663 }
664 ent->saved_sense_ptr = ent->sense_ptr;
665
666 esp->active_cmd = ent;
667
668 p = esp->command_block;
669 esp->msg_out_len = 0;
670
671 *p++ = IDENTIFY(0, lun);
672 *p++ = REQUEST_SENSE;
673 *p++ = ((dev->scsi_level <= SCSI_2) ?
674 (lun << 5) : 0);
675 *p++ = 0;
676 *p++ = 0;
677 *p++ = SCSI_SENSE_BUFFERSIZE;
678 *p++ = 0;
679
680 esp->select_state = ESP_SELECT_BASIC;
681
682 val = tgt;
683 if (esp->rev == FASHME)
684 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
685 esp_write8(val, ESP_BUSID);
686
687 esp_write_tgt_sync(esp, tgt);
688 esp_write_tgt_config3(esp, tgt);
689
690 val = (p - esp->command_block);
691
692 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
693}
694
695static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
696{
697 struct esp_cmd_entry *ent;
698
699 list_for_each_entry(ent, &esp->queued_cmds, list) {
700 struct scsi_cmnd *cmd = ent->cmd;
701 struct scsi_device *dev = cmd->device;
702 struct esp_lun_data *lp = dev->hostdata;
703
704 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
705 ent->tag[0] = 0;
706 ent->tag[1] = 0;
707 return ent;
708 }
709
710 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
711 ent->tag[0] = 0;
712 ent->tag[1] = 0;
713 }
714 ent->orig_tag[0] = ent->tag[0];
715 ent->orig_tag[1] = ent->tag[1];
716
717 if (esp_alloc_lun_tag(ent, lp) < 0)
718 continue;
719
720 return ent;
721 }
722
723 return NULL;
724}
725
726static void esp_maybe_execute_command(struct esp *esp)
727{
728 struct esp_target_data *tp;
729 struct scsi_device *dev;
730 struct scsi_cmnd *cmd;
731 struct esp_cmd_entry *ent;
732 bool select_and_stop = false;
733 int tgt, lun, i;
734 u32 val, start_cmd;
735 u8 *p;
736
737 if (esp->active_cmd ||
738 (esp->flags & ESP_FLAG_RESETTING))
739 return;
740
741 ent = find_and_prep_issuable_command(esp);
742 if (!ent)
743 return;
744
745 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
746 esp_autosense(esp, ent);
747 return;
748 }
749
750 cmd = ent->cmd;
751 dev = cmd->device;
752 tgt = dev->id;
753 lun = dev->lun;
754 tp = &esp->target[tgt];
755
756 list_move(&ent->list, &esp->active_cmds);
757
758 esp->active_cmd = ent;
759
760 esp_map_dma(esp, cmd);
761 esp_save_pointers(esp, ent);
762
763 if (!(cmd->cmd_len == 6 || cmd->cmd_len == 10 || cmd->cmd_len == 12))
764 select_and_stop = true;
765
766 p = esp->command_block;
767
768 esp->msg_out_len = 0;
769 if (tp->flags & ESP_TGT_CHECK_NEGO) {
770
771
772
773 if (tp->flags & ESP_TGT_BROKEN) {
774 tp->flags &= ~ESP_TGT_DISCONNECT;
775 tp->nego_goal_period = 0;
776 tp->nego_goal_offset = 0;
777 tp->nego_goal_width = 0;
778 tp->nego_goal_tags = 0;
779 }
780
781
782 if (spi_width(tp->starget) == tp->nego_goal_width &&
783 spi_period(tp->starget) == tp->nego_goal_period &&
784 spi_offset(tp->starget) == tp->nego_goal_offset) {
785 tp->flags &= ~ESP_TGT_CHECK_NEGO;
786 goto build_identify;
787 }
788
789 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
790 esp->msg_out_len =
791 spi_populate_width_msg(&esp->msg_out[0],
792 (tp->nego_goal_width ?
793 1 : 0));
794 tp->flags |= ESP_TGT_NEGO_WIDE;
795 } else if (esp_need_to_nego_sync(tp)) {
796 esp->msg_out_len =
797 spi_populate_sync_msg(&esp->msg_out[0],
798 tp->nego_goal_period,
799 tp->nego_goal_offset);
800 tp->flags |= ESP_TGT_NEGO_SYNC;
801 } else {
802 tp->flags &= ~ESP_TGT_CHECK_NEGO;
803 }
804
805
806 if (esp->msg_out_len)
807 select_and_stop = true;
808 }
809
810build_identify:
811 *p++ = IDENTIFY(tp->flags & ESP_TGT_DISCONNECT, lun);
812
813 if (ent->tag[0] && esp->rev == ESP100) {
814
815
816
817 select_and_stop = true;
818 }
819
820 if (select_and_stop) {
821 esp->cmd_bytes_left = cmd->cmd_len;
822 esp->cmd_bytes_ptr = &cmd->cmnd[0];
823
824 if (ent->tag[0]) {
825 for (i = esp->msg_out_len - 1;
826 i >= 0; i--)
827 esp->msg_out[i + 2] = esp->msg_out[i];
828 esp->msg_out[0] = ent->tag[0];
829 esp->msg_out[1] = ent->tag[1];
830 esp->msg_out_len += 2;
831 }
832
833 start_cmd = ESP_CMD_SELAS;
834 esp->select_state = ESP_SELECT_MSGOUT;
835 } else {
836 start_cmd = ESP_CMD_SELA;
837 if (ent->tag[0]) {
838 *p++ = ent->tag[0];
839 *p++ = ent->tag[1];
840
841 start_cmd = ESP_CMD_SA3;
842 }
843
844 for (i = 0; i < cmd->cmd_len; i++)
845 *p++ = cmd->cmnd[i];
846
847 esp->select_state = ESP_SELECT_BASIC;
848 }
849 val = tgt;
850 if (esp->rev == FASHME)
851 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
852 esp_write8(val, ESP_BUSID);
853
854 esp_write_tgt_sync(esp, tgt);
855 esp_write_tgt_config3(esp, tgt);
856
857 val = (p - esp->command_block);
858
859 if (esp_debug & ESP_DEBUG_SCSICMD) {
860 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
861 for (i = 0; i < cmd->cmd_len; i++)
862 printk("%02x ", cmd->cmnd[i]);
863 printk("]\n");
864 }
865
866 esp_send_dma_cmd(esp, val, 16, start_cmd);
867}
868
869static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
870{
871 struct list_head *head = &esp->esp_cmd_pool;
872 struct esp_cmd_entry *ret;
873
874 if (list_empty(head)) {
875 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
876 } else {
877 ret = list_entry(head->next, struct esp_cmd_entry, list);
878 list_del(&ret->list);
879 memset(ret, 0, sizeof(*ret));
880 }
881 return ret;
882}
883
884static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
885{
886 list_add(&ent->list, &esp->esp_cmd_pool);
887}
888
889static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
890 struct scsi_cmnd *cmd, unsigned int result)
891{
892 struct scsi_device *dev = cmd->device;
893 int tgt = dev->id;
894 int lun = dev->lun;
895
896 esp->active_cmd = NULL;
897 esp_unmap_dma(esp, cmd);
898 esp_free_lun_tag(ent, dev->hostdata);
899 cmd->result = result;
900
901 if (ent->eh_done) {
902 complete(ent->eh_done);
903 ent->eh_done = NULL;
904 }
905
906 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
907 esp_unmap_sense(esp, ent);
908
909
910
911
912
913 cmd->result = ((DRIVER_SENSE << 24) |
914 (DID_OK << 16) |
915 (COMMAND_COMPLETE << 8) |
916 (SAM_STAT_CHECK_CONDITION << 0));
917
918 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
919 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
920 int i;
921
922 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
923 esp->host->unique_id, tgt, lun);
924 for (i = 0; i < 18; i++)
925 printk("%02x ", cmd->sense_buffer[i]);
926 printk("]\n");
927 }
928 }
929
930 cmd->scsi_done(cmd);
931
932 list_del(&ent->list);
933 esp_put_ent(esp, ent);
934
935 esp_maybe_execute_command(esp);
936}
937
938static unsigned int compose_result(unsigned int status, unsigned int message,
939 unsigned int driver_code)
940{
941 return (status | (message << 8) | (driver_code << 16));
942}
943
944static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
945{
946 struct scsi_device *dev = ent->cmd->device;
947 struct esp_lun_data *lp = dev->hostdata;
948
949 scsi_track_queue_full(dev, lp->num_tagged - 1);
950}
951
952static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
953{
954 struct scsi_device *dev = cmd->device;
955 struct esp *esp = shost_priv(dev->host);
956 struct esp_cmd_priv *spriv;
957 struct esp_cmd_entry *ent;
958
959 ent = esp_get_ent(esp);
960 if (!ent)
961 return SCSI_MLQUEUE_HOST_BUSY;
962
963 ent->cmd = cmd;
964
965 cmd->scsi_done = done;
966
967 spriv = ESP_CMD_PRIV(cmd);
968 spriv->num_sg = 0;
969
970 list_add_tail(&ent->list, &esp->queued_cmds);
971
972 esp_maybe_execute_command(esp);
973
974 return 0;
975}
976
977static DEF_SCSI_QCMD(esp_queuecommand)
978
979static int esp_check_gross_error(struct esp *esp)
980{
981 if (esp->sreg & ESP_STAT_SPAM) {
982
983
984
985
986
987
988 shost_printk(KERN_ERR, esp->host,
989 "Gross error sreg[%02x]\n", esp->sreg);
990
991 return 1;
992 }
993 return 0;
994}
995
996static int esp_check_spur_intr(struct esp *esp)
997{
998 switch (esp->rev) {
999 case ESP100:
1000 case ESP100A:
1001
1002
1003
1004 esp->sreg &= ~ESP_STAT_INTR;
1005 break;
1006
1007 default:
1008 if (!(esp->sreg & ESP_STAT_INTR)) {
1009 if (esp->ireg & ESP_INTR_SR)
1010 return 1;
1011
1012
1013
1014
1015 if (!esp->ops->dma_error(esp)) {
1016 shost_printk(KERN_ERR, esp->host,
1017 "Spurious irq, sreg=%02x.\n",
1018 esp->sreg);
1019 return -1;
1020 }
1021
1022 shost_printk(KERN_ERR, esp->host, "DMA error\n");
1023
1024
1025 return -1;
1026 }
1027 break;
1028 }
1029
1030 return 0;
1031}
1032
1033static void esp_schedule_reset(struct esp *esp)
1034{
1035 esp_log_reset("esp_schedule_reset() from %ps\n",
1036 __builtin_return_address(0));
1037 esp->flags |= ESP_FLAG_RESETTING;
1038 esp_event(esp, ESP_EVENT_RESET);
1039}
1040
1041
1042
1043
1044
1045static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1046 struct esp_lun_data *lp)
1047{
1048 struct esp_cmd_entry *ent;
1049 int i;
1050
1051 if (!lp->num_tagged) {
1052 shost_printk(KERN_ERR, esp->host,
1053 "Reconnect w/num_tagged==0\n");
1054 return NULL;
1055 }
1056
1057 esp_log_reconnect("reconnect tag, ");
1058
1059 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1060 if (esp->ops->irq_pending(esp))
1061 break;
1062 }
1063 if (i == ESP_QUICKIRQ_LIMIT) {
1064 shost_printk(KERN_ERR, esp->host,
1065 "Reconnect IRQ1 timeout\n");
1066 return NULL;
1067 }
1068
1069 esp->sreg = esp_read8(ESP_STATUS);
1070 esp->ireg = esp_read8(ESP_INTRPT);
1071
1072 esp_log_reconnect("IRQ(%d:%x:%x), ",
1073 i, esp->ireg, esp->sreg);
1074
1075 if (esp->ireg & ESP_INTR_DC) {
1076 shost_printk(KERN_ERR, esp->host,
1077 "Reconnect, got disconnect.\n");
1078 return NULL;
1079 }
1080
1081 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1082 shost_printk(KERN_ERR, esp->host,
1083 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
1084 return NULL;
1085 }
1086
1087
1088 esp->command_block[0] = 0xff;
1089 esp->command_block[1] = 0xff;
1090 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1091 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1092
1093
1094 scsi_esp_cmd(esp, ESP_CMD_MOK);
1095
1096 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1097 if (esp->ops->irq_pending(esp)) {
1098 esp->sreg = esp_read8(ESP_STATUS);
1099 esp->ireg = esp_read8(ESP_INTRPT);
1100 if (esp->ireg & ESP_INTR_FDONE)
1101 break;
1102 }
1103 udelay(1);
1104 }
1105 if (i == ESP_RESELECT_TAG_LIMIT) {
1106 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
1107 return NULL;
1108 }
1109 esp->ops->dma_drain(esp);
1110 esp->ops->dma_invalidate(esp);
1111
1112 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1113 i, esp->ireg, esp->sreg,
1114 esp->command_block[0],
1115 esp->command_block[1]);
1116
1117 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1118 esp->command_block[0] > ORDERED_QUEUE_TAG) {
1119 shost_printk(KERN_ERR, esp->host,
1120 "Reconnect, bad tag type %02x.\n",
1121 esp->command_block[0]);
1122 return NULL;
1123 }
1124
1125 ent = lp->tagged_cmds[esp->command_block[1]];
1126 if (!ent) {
1127 shost_printk(KERN_ERR, esp->host,
1128 "Reconnect, no entry for tag %02x.\n",
1129 esp->command_block[1]);
1130 return NULL;
1131 }
1132
1133 return ent;
1134}
1135
1136static int esp_reconnect(struct esp *esp)
1137{
1138 struct esp_cmd_entry *ent;
1139 struct esp_target_data *tp;
1140 struct esp_lun_data *lp;
1141 struct scsi_device *dev;
1142 int target, lun;
1143
1144 BUG_ON(esp->active_cmd);
1145 if (esp->rev == FASHME) {
1146
1147
1148
1149 target = esp->fifo[0];
1150 lun = esp->fifo[1] & 0x7;
1151 } else {
1152 u8 bits = esp_read8(ESP_FDATA);
1153
1154
1155
1156
1157
1158
1159
1160 if (!(bits & esp->scsi_id_mask))
1161 goto do_reset;
1162 bits &= ~esp->scsi_id_mask;
1163 if (!bits || (bits & (bits - 1)))
1164 goto do_reset;
1165
1166 target = ffs(bits) - 1;
1167 lun = (esp_read8(ESP_FDATA) & 0x7);
1168
1169 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1170 if (esp->rev == ESP100) {
1171 u8 ireg = esp_read8(ESP_INTRPT);
1172
1173
1174
1175
1176
1177 if (ireg & ESP_INTR_SR)
1178 goto do_reset;
1179 }
1180 scsi_esp_cmd(esp, ESP_CMD_NULL);
1181 }
1182
1183 esp_write_tgt_sync(esp, target);
1184 esp_write_tgt_config3(esp, target);
1185
1186 scsi_esp_cmd(esp, ESP_CMD_MOK);
1187
1188 if (esp->rev == FASHME)
1189 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1190 ESP_BUSID);
1191
1192 tp = &esp->target[target];
1193 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1194 if (!dev) {
1195 shost_printk(KERN_ERR, esp->host,
1196 "Reconnect, no lp tgt[%u] lun[%u]\n",
1197 target, lun);
1198 goto do_reset;
1199 }
1200 lp = dev->hostdata;
1201
1202 ent = lp->non_tagged_cmd;
1203 if (!ent) {
1204 ent = esp_reconnect_with_tag(esp, lp);
1205 if (!ent)
1206 goto do_reset;
1207 }
1208
1209 esp->active_cmd = ent;
1210
1211 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1212 esp_restore_pointers(esp, ent);
1213 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1214 return 1;
1215
1216do_reset:
1217 esp_schedule_reset(esp);
1218 return 0;
1219}
1220
1221static int esp_finish_select(struct esp *esp)
1222{
1223 struct esp_cmd_entry *ent;
1224 struct scsi_cmnd *cmd;
1225
1226
1227 esp->select_state = ESP_SELECT_NONE;
1228
1229 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1230 ent = esp->active_cmd;
1231 cmd = ent->cmd;
1232
1233 if (esp->ops->dma_error(esp)) {
1234
1235
1236
1237 esp_schedule_reset(esp);
1238 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1239 return 0;
1240 }
1241
1242 esp->ops->dma_invalidate(esp);
1243
1244 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1245 struct esp_target_data *tp = &esp->target[cmd->device->id];
1246
1247
1248
1249
1250
1251 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1252 esp_unmap_dma(esp, cmd);
1253 esp_free_lun_tag(ent, cmd->device->hostdata);
1254 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1255 esp->cmd_bytes_ptr = NULL;
1256 esp->cmd_bytes_left = 0;
1257 } else {
1258 esp_unmap_sense(esp, ent);
1259 }
1260
1261
1262
1263
1264 list_move(&ent->list, &esp->queued_cmds);
1265 esp->active_cmd = NULL;
1266
1267
1268
1269
1270 return 0;
1271 }
1272
1273 if (esp->ireg == ESP_INTR_DC) {
1274 struct scsi_device *dev = cmd->device;
1275
1276
1277
1278
1279
1280 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1281
1282 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1283 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1284 return 1;
1285 }
1286
1287 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1288
1289
1290
1291 if (esp->rev <= ESP236) {
1292 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1293
1294 scsi_esp_cmd(esp, ESP_CMD_NULL);
1295
1296 if (!fcnt &&
1297 (!esp->prev_soff ||
1298 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1299 esp_flush_fifo(esp);
1300 }
1301
1302
1303
1304
1305 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1306 return 0;
1307 }
1308
1309 shost_printk(KERN_INFO, esp->host,
1310 "Unexpected selection completion ireg[%x]\n", esp->ireg);
1311 esp_schedule_reset(esp);
1312 return 0;
1313}
1314
1315static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1316 struct scsi_cmnd *cmd)
1317{
1318 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1319
1320 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1321 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1322 fifo_cnt <<= 1;
1323
1324 ecount = 0;
1325 if (!(esp->sreg & ESP_STAT_TCNT)) {
1326 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1327 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1328 if (esp->rev == FASHME)
1329 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1330 if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1331 ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
1332 }
1333
1334 bytes_sent = esp->data_dma_len;
1335 bytes_sent -= ecount;
1336 bytes_sent -= esp->send_cmd_residual;
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346 if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1347 size_t count = 1;
1348 size_t offset = bytes_sent;
1349 u8 bval = esp_read8(ESP_FDATA);
1350
1351 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1352 ent->sense_ptr[bytes_sent] = bval;
1353 else {
1354 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1355 u8 *ptr;
1356
1357 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->num_sg,
1358 &offset, &count);
1359 if (likely(ptr)) {
1360 *(ptr + offset) = bval;
1361 scsi_kunmap_atomic_sg(ptr);
1362 }
1363 }
1364 bytes_sent += fifo_cnt;
1365 ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1366 }
1367 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1368 bytes_sent -= fifo_cnt;
1369
1370 flush_fifo = 0;
1371 if (!esp->prev_soff) {
1372
1373 flush_fifo = 1;
1374 } else {
1375 if (esp->rev == ESP100) {
1376 u32 fflags, phase;
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389 esp->sreg = esp_read8(ESP_STATUS);
1390 phase = esp->sreg & ESP_STAT_PMASK;
1391 fflags = esp_read8(ESP_FFLAGS);
1392
1393 if ((phase == ESP_DOP &&
1394 (fflags & ESP_FF_ONOTZERO)) ||
1395 (phase == ESP_DIP &&
1396 (fflags & ESP_FF_FBYTES)))
1397 return -1;
1398 }
1399 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1400 flush_fifo = 1;
1401 }
1402
1403 if (flush_fifo)
1404 esp_flush_fifo(esp);
1405
1406 return bytes_sent;
1407}
1408
1409static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1410 u8 scsi_period, u8 scsi_offset,
1411 u8 esp_stp, u8 esp_soff)
1412{
1413 spi_period(tp->starget) = scsi_period;
1414 spi_offset(tp->starget) = scsi_offset;
1415 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1416
1417 if (esp_soff) {
1418 esp_stp &= 0x1f;
1419 esp_soff |= esp->radelay;
1420 if (esp->rev >= FAS236) {
1421 u8 bit = ESP_CONFIG3_FSCSI;
1422 if (esp->rev >= FAS100A)
1423 bit = ESP_CONFIG3_FAST;
1424
1425 if (scsi_period < 50) {
1426 if (esp->rev == FASHME)
1427 esp_soff &= ~esp->radelay;
1428 tp->esp_config3 |= bit;
1429 } else {
1430 tp->esp_config3 &= ~bit;
1431 }
1432 esp->prev_cfg3 = tp->esp_config3;
1433 esp_write8(esp->prev_cfg3, ESP_CFG3);
1434 }
1435 }
1436
1437 tp->esp_period = esp->prev_stp = esp_stp;
1438 tp->esp_offset = esp->prev_soff = esp_soff;
1439
1440 esp_write8(esp_soff, ESP_SOFF);
1441 esp_write8(esp_stp, ESP_STP);
1442
1443 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1444
1445 spi_display_xfer_agreement(tp->starget);
1446}
1447
1448static void esp_msgin_reject(struct esp *esp)
1449{
1450 struct esp_cmd_entry *ent = esp->active_cmd;
1451 struct scsi_cmnd *cmd = ent->cmd;
1452 struct esp_target_data *tp;
1453 int tgt;
1454
1455 tgt = cmd->device->id;
1456 tp = &esp->target[tgt];
1457
1458 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1459 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1460
1461 if (!esp_need_to_nego_sync(tp)) {
1462 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1463 scsi_esp_cmd(esp, ESP_CMD_RATN);
1464 } else {
1465 esp->msg_out_len =
1466 spi_populate_sync_msg(&esp->msg_out[0],
1467 tp->nego_goal_period,
1468 tp->nego_goal_offset);
1469 tp->flags |= ESP_TGT_NEGO_SYNC;
1470 scsi_esp_cmd(esp, ESP_CMD_SATN);
1471 }
1472 return;
1473 }
1474
1475 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1476 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1477 tp->esp_period = 0;
1478 tp->esp_offset = 0;
1479 esp_setsync(esp, tp, 0, 0, 0, 0);
1480 scsi_esp_cmd(esp, ESP_CMD_RATN);
1481 return;
1482 }
1483
1484 shost_printk(KERN_INFO, esp->host, "Unexpected MESSAGE REJECT\n");
1485 esp_schedule_reset(esp);
1486}
1487
1488static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1489{
1490 u8 period = esp->msg_in[3];
1491 u8 offset = esp->msg_in[4];
1492 u8 stp;
1493
1494 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1495 goto do_reject;
1496
1497 if (offset > 15)
1498 goto do_reject;
1499
1500 if (offset) {
1501 int one_clock;
1502
1503 if (period > esp->max_period) {
1504 period = offset = 0;
1505 goto do_sdtr;
1506 }
1507 if (period < esp->min_period)
1508 goto do_reject;
1509
1510 one_clock = esp->ccycle / 1000;
1511 stp = DIV_ROUND_UP(period << 2, one_clock);
1512 if (stp && esp->rev >= FAS236) {
1513 if (stp >= 50)
1514 stp--;
1515 }
1516 } else {
1517 stp = 0;
1518 }
1519
1520 esp_setsync(esp, tp, period, offset, stp, offset);
1521 return;
1522
1523do_reject:
1524 esp->msg_out[0] = MESSAGE_REJECT;
1525 esp->msg_out_len = 1;
1526 scsi_esp_cmd(esp, ESP_CMD_SATN);
1527 return;
1528
1529do_sdtr:
1530 tp->nego_goal_period = period;
1531 tp->nego_goal_offset = offset;
1532 esp->msg_out_len =
1533 spi_populate_sync_msg(&esp->msg_out[0],
1534 tp->nego_goal_period,
1535 tp->nego_goal_offset);
1536 scsi_esp_cmd(esp, ESP_CMD_SATN);
1537}
1538
1539static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1540{
1541 int size = 8 << esp->msg_in[3];
1542 u8 cfg3;
1543
1544 if (esp->rev != FASHME)
1545 goto do_reject;
1546
1547 if (size != 8 && size != 16)
1548 goto do_reject;
1549
1550 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1551 goto do_reject;
1552
1553 cfg3 = tp->esp_config3;
1554 if (size == 16) {
1555 tp->flags |= ESP_TGT_WIDE;
1556 cfg3 |= ESP_CONFIG3_EWIDE;
1557 } else {
1558 tp->flags &= ~ESP_TGT_WIDE;
1559 cfg3 &= ~ESP_CONFIG3_EWIDE;
1560 }
1561 tp->esp_config3 = cfg3;
1562 esp->prev_cfg3 = cfg3;
1563 esp_write8(cfg3, ESP_CFG3);
1564
1565 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1566
1567 spi_period(tp->starget) = 0;
1568 spi_offset(tp->starget) = 0;
1569 if (!esp_need_to_nego_sync(tp)) {
1570 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1571 scsi_esp_cmd(esp, ESP_CMD_RATN);
1572 } else {
1573 esp->msg_out_len =
1574 spi_populate_sync_msg(&esp->msg_out[0],
1575 tp->nego_goal_period,
1576 tp->nego_goal_offset);
1577 tp->flags |= ESP_TGT_NEGO_SYNC;
1578 scsi_esp_cmd(esp, ESP_CMD_SATN);
1579 }
1580 return;
1581
1582do_reject:
1583 esp->msg_out[0] = MESSAGE_REJECT;
1584 esp->msg_out_len = 1;
1585 scsi_esp_cmd(esp, ESP_CMD_SATN);
1586}
1587
1588static void esp_msgin_extended(struct esp *esp)
1589{
1590 struct esp_cmd_entry *ent = esp->active_cmd;
1591 struct scsi_cmnd *cmd = ent->cmd;
1592 struct esp_target_data *tp;
1593 int tgt = cmd->device->id;
1594
1595 tp = &esp->target[tgt];
1596 if (esp->msg_in[2] == EXTENDED_SDTR) {
1597 esp_msgin_sdtr(esp, tp);
1598 return;
1599 }
1600 if (esp->msg_in[2] == EXTENDED_WDTR) {
1601 esp_msgin_wdtr(esp, tp);
1602 return;
1603 }
1604
1605 shost_printk(KERN_INFO, esp->host,
1606 "Unexpected extended msg type %x\n", esp->msg_in[2]);
1607
1608 esp->msg_out[0] = MESSAGE_REJECT;
1609 esp->msg_out_len = 1;
1610 scsi_esp_cmd(esp, ESP_CMD_SATN);
1611}
1612
1613
1614
1615
1616static int esp_msgin_process(struct esp *esp)
1617{
1618 u8 msg0 = esp->msg_in[0];
1619 int len = esp->msg_in_len;
1620
1621 if (msg0 & 0x80) {
1622
1623 shost_printk(KERN_INFO, esp->host,
1624 "Unexpected msgin identify\n");
1625 return 0;
1626 }
1627
1628 switch (msg0) {
1629 case EXTENDED_MESSAGE:
1630 if (len == 1)
1631 return 1;
1632 if (len < esp->msg_in[1] + 2)
1633 return 1;
1634 esp_msgin_extended(esp);
1635 return 0;
1636
1637 case IGNORE_WIDE_RESIDUE: {
1638 struct esp_cmd_entry *ent;
1639 struct esp_cmd_priv *spriv;
1640 if (len == 1)
1641 return 1;
1642
1643 if (esp->msg_in[1] != 1)
1644 goto do_reject;
1645
1646 ent = esp->active_cmd;
1647 spriv = ESP_CMD_PRIV(ent->cmd);
1648
1649 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1650 spriv->cur_sg--;
1651 spriv->cur_residue = 1;
1652 } else
1653 spriv->cur_residue++;
1654 spriv->tot_residue++;
1655 return 0;
1656 }
1657 case NOP:
1658 return 0;
1659 case RESTORE_POINTERS:
1660 esp_restore_pointers(esp, esp->active_cmd);
1661 return 0;
1662 case SAVE_POINTERS:
1663 esp_save_pointers(esp, esp->active_cmd);
1664 return 0;
1665
1666 case COMMAND_COMPLETE:
1667 case DISCONNECT: {
1668 struct esp_cmd_entry *ent = esp->active_cmd;
1669
1670 ent->message = msg0;
1671 esp_event(esp, ESP_EVENT_FREE_BUS);
1672 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1673 return 0;
1674 }
1675 case MESSAGE_REJECT:
1676 esp_msgin_reject(esp);
1677 return 0;
1678
1679 default:
1680 do_reject:
1681 esp->msg_out[0] = MESSAGE_REJECT;
1682 esp->msg_out_len = 1;
1683 scsi_esp_cmd(esp, ESP_CMD_SATN);
1684 return 0;
1685 }
1686}
1687
1688static int esp_process_event(struct esp *esp)
1689{
1690 int write, i;
1691
1692again:
1693 write = 0;
1694 esp_log_event("process event %d phase %x\n",
1695 esp->event, esp->sreg & ESP_STAT_PMASK);
1696 switch (esp->event) {
1697 case ESP_EVENT_CHECK_PHASE:
1698 switch (esp->sreg & ESP_STAT_PMASK) {
1699 case ESP_DOP:
1700 esp_event(esp, ESP_EVENT_DATA_OUT);
1701 break;
1702 case ESP_DIP:
1703 esp_event(esp, ESP_EVENT_DATA_IN);
1704 break;
1705 case ESP_STATP:
1706 esp_flush_fifo(esp);
1707 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1708 esp_event(esp, ESP_EVENT_STATUS);
1709 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1710 return 1;
1711
1712 case ESP_MOP:
1713 esp_event(esp, ESP_EVENT_MSGOUT);
1714 break;
1715
1716 case ESP_MIP:
1717 esp_event(esp, ESP_EVENT_MSGIN);
1718 break;
1719
1720 case ESP_CMDP:
1721 esp_event(esp, ESP_EVENT_CMD_START);
1722 break;
1723
1724 default:
1725 shost_printk(KERN_INFO, esp->host,
1726 "Unexpected phase, sreg=%02x\n",
1727 esp->sreg);
1728 esp_schedule_reset(esp);
1729 return 0;
1730 }
1731 goto again;
1732
1733 case ESP_EVENT_DATA_IN:
1734 write = 1;
1735
1736
1737 case ESP_EVENT_DATA_OUT: {
1738 struct esp_cmd_entry *ent = esp->active_cmd;
1739 struct scsi_cmnd *cmd = ent->cmd;
1740 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1741 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1742
1743 if (esp->rev == ESP100)
1744 scsi_esp_cmd(esp, ESP_CMD_NULL);
1745
1746 if (write)
1747 ent->flags |= ESP_CMD_FLAG_WRITE;
1748 else
1749 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1750
1751 if (esp->ops->dma_length_limit)
1752 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1753 dma_len);
1754 else
1755 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1756
1757 esp->data_dma_len = dma_len;
1758
1759 if (!dma_len) {
1760 shost_printk(KERN_ERR, esp->host,
1761 "DMA length is zero!\n");
1762 shost_printk(KERN_ERR, esp->host,
1763 "cur adr[%08llx] len[%08x]\n",
1764 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1765 esp_cur_dma_len(ent, cmd));
1766 esp_schedule_reset(esp);
1767 return 0;
1768 }
1769
1770 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
1771 (unsigned long long)dma_addr, dma_len, write);
1772
1773 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1774 write, ESP_CMD_DMA | ESP_CMD_TI);
1775 esp_event(esp, ESP_EVENT_DATA_DONE);
1776 break;
1777 }
1778 case ESP_EVENT_DATA_DONE: {
1779 struct esp_cmd_entry *ent = esp->active_cmd;
1780 struct scsi_cmnd *cmd = ent->cmd;
1781 int bytes_sent;
1782
1783 if (esp->ops->dma_error(esp)) {
1784 shost_printk(KERN_INFO, esp->host,
1785 "data done, DMA error, resetting\n");
1786 esp_schedule_reset(esp);
1787 return 0;
1788 }
1789
1790 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1791
1792
1793 esp->ops->dma_drain(esp);
1794 }
1795 esp->ops->dma_invalidate(esp);
1796
1797 if (esp->ireg != ESP_INTR_BSERV) {
1798
1799
1800
1801 shost_printk(KERN_INFO, esp->host,
1802 "data done, not BSERV, resetting\n");
1803 esp_schedule_reset(esp);
1804 return 0;
1805 }
1806
1807 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1808
1809 esp_log_datadone("data done flgs[%x] sent[%d]\n",
1810 ent->flags, bytes_sent);
1811
1812 if (bytes_sent < 0) {
1813
1814 esp_schedule_reset(esp);
1815 return 0;
1816 }
1817
1818 esp_advance_dma(esp, ent, cmd, bytes_sent);
1819 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1820 goto again;
1821 }
1822
1823 case ESP_EVENT_STATUS: {
1824 struct esp_cmd_entry *ent = esp->active_cmd;
1825
1826 if (esp->ireg & ESP_INTR_FDONE) {
1827 ent->status = esp_read8(ESP_FDATA);
1828 ent->message = esp_read8(ESP_FDATA);
1829 scsi_esp_cmd(esp, ESP_CMD_MOK);
1830 } else if (esp->ireg == ESP_INTR_BSERV) {
1831 ent->status = esp_read8(ESP_FDATA);
1832 ent->message = 0xff;
1833 esp_event(esp, ESP_EVENT_MSGIN);
1834 return 0;
1835 }
1836
1837 if (ent->message != COMMAND_COMPLETE) {
1838 shost_printk(KERN_INFO, esp->host,
1839 "Unexpected message %x in status\n",
1840 ent->message);
1841 esp_schedule_reset(esp);
1842 return 0;
1843 }
1844
1845 esp_event(esp, ESP_EVENT_FREE_BUS);
1846 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1847 break;
1848 }
1849 case ESP_EVENT_FREE_BUS: {
1850 struct esp_cmd_entry *ent = esp->active_cmd;
1851 struct scsi_cmnd *cmd = ent->cmd;
1852
1853 if (ent->message == COMMAND_COMPLETE ||
1854 ent->message == DISCONNECT)
1855 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1856
1857 if (ent->message == COMMAND_COMPLETE) {
1858 esp_log_cmddone("Command done status[%x] message[%x]\n",
1859 ent->status, ent->message);
1860 if (ent->status == SAM_STAT_TASK_SET_FULL)
1861 esp_event_queue_full(esp, ent);
1862
1863 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1864 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1865 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1866 esp_autosense(esp, ent);
1867 } else {
1868 esp_cmd_is_done(esp, ent, cmd,
1869 compose_result(ent->status,
1870 ent->message,
1871 DID_OK));
1872 }
1873 } else if (ent->message == DISCONNECT) {
1874 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
1875 cmd->device->id,
1876 ent->tag[0], ent->tag[1]);
1877
1878 esp->active_cmd = NULL;
1879 esp_maybe_execute_command(esp);
1880 } else {
1881 shost_printk(KERN_INFO, esp->host,
1882 "Unexpected message %x in freebus\n",
1883 ent->message);
1884 esp_schedule_reset(esp);
1885 return 0;
1886 }
1887 if (esp->active_cmd)
1888 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1889 break;
1890 }
1891 case ESP_EVENT_MSGOUT: {
1892 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1893
1894 if (esp_debug & ESP_DEBUG_MSGOUT) {
1895 int i;
1896 printk("ESP: Sending message [ ");
1897 for (i = 0; i < esp->msg_out_len; i++)
1898 printk("%02x ", esp->msg_out[i]);
1899 printk("]\n");
1900 }
1901
1902 if (esp->rev == FASHME) {
1903 int i;
1904
1905
1906 for (i = 0; i < esp->msg_out_len; i++) {
1907 esp_write8(esp->msg_out[i], ESP_FDATA);
1908 esp_write8(0, ESP_FDATA);
1909 }
1910 scsi_esp_cmd(esp, ESP_CMD_TI);
1911 } else {
1912 if (esp->msg_out_len == 1) {
1913 esp_write8(esp->msg_out[0], ESP_FDATA);
1914 scsi_esp_cmd(esp, ESP_CMD_TI);
1915 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1916 for (i = 0; i < esp->msg_out_len; i++)
1917 esp_write8(esp->msg_out[i], ESP_FDATA);
1918 scsi_esp_cmd(esp, ESP_CMD_TI);
1919 } else {
1920
1921 memcpy(esp->command_block,
1922 esp->msg_out,
1923 esp->msg_out_len);
1924
1925 esp->ops->send_dma_cmd(esp,
1926 esp->command_block_dma,
1927 esp->msg_out_len,
1928 esp->msg_out_len,
1929 0,
1930 ESP_CMD_DMA|ESP_CMD_TI);
1931 }
1932 }
1933 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1934 break;
1935 }
1936 case ESP_EVENT_MSGOUT_DONE:
1937 if (esp->rev == FASHME) {
1938 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1939 } else {
1940 if (esp->msg_out_len > 1)
1941 esp->ops->dma_invalidate(esp);
1942
1943
1944
1945
1946 if (!(esp->ireg & ESP_INTR_DC))
1947 scsi_esp_cmd(esp, ESP_CMD_NULL);
1948 }
1949
1950 esp->msg_out_len = 0;
1951
1952 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1953 goto again;
1954 case ESP_EVENT_MSGIN:
1955 if (esp->ireg & ESP_INTR_BSERV) {
1956 if (esp->rev == FASHME) {
1957 if (!(esp_read8(ESP_STATUS2) &
1958 ESP_STAT2_FEMPTY))
1959 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1960 } else {
1961 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1962 if (esp->rev == ESP100)
1963 scsi_esp_cmd(esp, ESP_CMD_NULL);
1964 }
1965 scsi_esp_cmd(esp, ESP_CMD_TI);
1966 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1967 return 1;
1968 }
1969 if (esp->ireg & ESP_INTR_FDONE) {
1970 u8 val;
1971
1972 if (esp->rev == FASHME)
1973 val = esp->fifo[0];
1974 else
1975 val = esp_read8(ESP_FDATA);
1976 esp->msg_in[esp->msg_in_len++] = val;
1977
1978 esp_log_msgin("Got msgin byte %x\n", val);
1979
1980 if (!esp_msgin_process(esp))
1981 esp->msg_in_len = 0;
1982
1983 if (esp->rev == FASHME)
1984 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1985
1986 scsi_esp_cmd(esp, ESP_CMD_MOK);
1987
1988
1989 if (esp->event == ESP_EVENT_RESET)
1990 return 0;
1991
1992 if (esp->event != ESP_EVENT_FREE_BUS)
1993 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1994 } else {
1995 shost_printk(KERN_INFO, esp->host,
1996 "MSGIN neither BSERV not FDON, resetting");
1997 esp_schedule_reset(esp);
1998 return 0;
1999 }
2000 break;
2001 case ESP_EVENT_CMD_START:
2002 memcpy(esp->command_block, esp->cmd_bytes_ptr,
2003 esp->cmd_bytes_left);
2004 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
2005 esp_event(esp, ESP_EVENT_CMD_DONE);
2006 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2007 break;
2008 case ESP_EVENT_CMD_DONE:
2009 esp->ops->dma_invalidate(esp);
2010 if (esp->ireg & ESP_INTR_BSERV) {
2011 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2012 goto again;
2013 }
2014 esp_schedule_reset(esp);
2015 return 0;
2016
2017 case ESP_EVENT_RESET:
2018 scsi_esp_cmd(esp, ESP_CMD_RS);
2019 break;
2020
2021 default:
2022 shost_printk(KERN_INFO, esp->host,
2023 "Unexpected event %x, resetting\n", esp->event);
2024 esp_schedule_reset(esp);
2025 return 0;
2026 }
2027 return 1;
2028}
2029
2030static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2031{
2032 struct scsi_cmnd *cmd = ent->cmd;
2033
2034 esp_unmap_dma(esp, cmd);
2035 esp_free_lun_tag(ent, cmd->device->hostdata);
2036 cmd->result = DID_RESET << 16;
2037
2038 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
2039 esp_unmap_sense(esp, ent);
2040
2041 cmd->scsi_done(cmd);
2042 list_del(&ent->list);
2043 esp_put_ent(esp, ent);
2044}
2045
2046static void esp_clear_hold(struct scsi_device *dev, void *data)
2047{
2048 struct esp_lun_data *lp = dev->hostdata;
2049
2050 BUG_ON(lp->num_tagged);
2051 lp->hold = 0;
2052}
2053
2054static void esp_reset_cleanup(struct esp *esp)
2055{
2056 struct esp_cmd_entry *ent, *tmp;
2057 int i;
2058
2059 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2060 struct scsi_cmnd *cmd = ent->cmd;
2061
2062 list_del(&ent->list);
2063 cmd->result = DID_RESET << 16;
2064 cmd->scsi_done(cmd);
2065 esp_put_ent(esp, ent);
2066 }
2067
2068 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2069 if (ent == esp->active_cmd)
2070 esp->active_cmd = NULL;
2071 esp_reset_cleanup_one(esp, ent);
2072 }
2073
2074 BUG_ON(esp->active_cmd != NULL);
2075
2076
2077 for (i = 0; i < ESP_MAX_TARGET; i++) {
2078 struct esp_target_data *tp = &esp->target[i];
2079
2080 tp->esp_period = 0;
2081 tp->esp_offset = 0;
2082 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2083 ESP_CONFIG3_FSCSI |
2084 ESP_CONFIG3_FAST);
2085 tp->flags &= ~ESP_TGT_WIDE;
2086 tp->flags |= ESP_TGT_CHECK_NEGO;
2087
2088 if (tp->starget)
2089 __starget_for_each_device(tp->starget, NULL,
2090 esp_clear_hold);
2091 }
2092 esp->flags &= ~ESP_FLAG_RESETTING;
2093}
2094
2095
2096static void __esp_interrupt(struct esp *esp)
2097{
2098 int finish_reset, intr_done;
2099 u8 phase;
2100
2101
2102
2103
2104 esp->sreg = esp_read8(ESP_STATUS);
2105 esp->seqreg = esp_read8(ESP_SSTEP);
2106 esp->ireg = esp_read8(ESP_INTRPT);
2107
2108 if (esp->flags & ESP_FLAG_RESETTING) {
2109 finish_reset = 1;
2110 } else {
2111 if (esp_check_gross_error(esp))
2112 return;
2113
2114 finish_reset = esp_check_spur_intr(esp);
2115 if (finish_reset < 0)
2116 return;
2117 }
2118
2119 if (esp->ireg & ESP_INTR_SR)
2120 finish_reset = 1;
2121
2122 if (finish_reset) {
2123 esp_reset_cleanup(esp);
2124 if (esp->eh_reset) {
2125 complete(esp->eh_reset);
2126 esp->eh_reset = NULL;
2127 }
2128 return;
2129 }
2130
2131 phase = (esp->sreg & ESP_STAT_PMASK);
2132 if (esp->rev == FASHME) {
2133 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2134 esp->select_state == ESP_SELECT_NONE &&
2135 esp->event != ESP_EVENT_STATUS &&
2136 esp->event != ESP_EVENT_DATA_DONE) ||
2137 (esp->ireg & ESP_INTR_RSEL)) {
2138 esp->sreg2 = esp_read8(ESP_STATUS2);
2139 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2140 (esp->sreg2 & ESP_STAT2_F1BYTE))
2141 hme_read_fifo(esp);
2142 }
2143 }
2144
2145 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
2146 "sreg2[%02x] ireg[%02x]\n",
2147 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2148
2149 intr_done = 0;
2150
2151 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2152 shost_printk(KERN_INFO, esp->host,
2153 "unexpected IREG %02x\n", esp->ireg);
2154 if (esp->ireg & ESP_INTR_IC)
2155 esp_dump_cmd_log(esp);
2156
2157 esp_schedule_reset(esp);
2158 } else {
2159 if (esp->ireg & ESP_INTR_RSEL) {
2160 if (esp->active_cmd)
2161 (void) esp_finish_select(esp);
2162 intr_done = esp_reconnect(esp);
2163 } else {
2164
2165 if (esp->select_state != ESP_SELECT_NONE)
2166 intr_done = esp_finish_select(esp);
2167 }
2168 }
2169 while (!intr_done)
2170 intr_done = esp_process_event(esp);
2171}
2172
2173irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2174{
2175 struct esp *esp = dev_id;
2176 unsigned long flags;
2177 irqreturn_t ret;
2178
2179 spin_lock_irqsave(esp->host->host_lock, flags);
2180 ret = IRQ_NONE;
2181 if (esp->ops->irq_pending(esp)) {
2182 ret = IRQ_HANDLED;
2183 for (;;) {
2184 int i;
2185
2186 __esp_interrupt(esp);
2187 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2188 break;
2189 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2190
2191 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2192 if (esp->ops->irq_pending(esp))
2193 break;
2194 }
2195 if (i == ESP_QUICKIRQ_LIMIT)
2196 break;
2197 }
2198 }
2199 spin_unlock_irqrestore(esp->host->host_lock, flags);
2200
2201 return ret;
2202}
2203EXPORT_SYMBOL(scsi_esp_intr);
2204
2205static void esp_get_revision(struct esp *esp)
2206{
2207 u8 val;
2208
2209 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2210 if (esp->config2 == 0) {
2211 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2212 esp_write8(esp->config2, ESP_CFG2);
2213
2214 val = esp_read8(ESP_CFG2);
2215 val &= ~ESP_CONFIG2_MAGIC;
2216
2217 esp->config2 = 0;
2218 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2219
2220
2221
2222
2223
2224 esp->rev = ESP100;
2225 return;
2226 }
2227 }
2228
2229 esp_set_all_config3(esp, 5);
2230 esp->prev_cfg3 = 5;
2231 esp_write8(esp->config2, ESP_CFG2);
2232 esp_write8(0, ESP_CFG3);
2233 esp_write8(esp->prev_cfg3, ESP_CFG3);
2234
2235 val = esp_read8(ESP_CFG3);
2236 if (val != 5) {
2237
2238
2239
2240 esp->rev = ESP100A;
2241 } else {
2242 esp_set_all_config3(esp, 0);
2243 esp->prev_cfg3 = 0;
2244 esp_write8(esp->prev_cfg3, ESP_CFG3);
2245
2246
2247
2248
2249 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2250 esp->rev = FAST;
2251 esp->sync_defp = SYNC_DEFP_FAST;
2252 } else {
2253 esp->rev = ESP236;
2254 }
2255 }
2256}
2257
2258static void esp_init_swstate(struct esp *esp)
2259{
2260 int i;
2261
2262 INIT_LIST_HEAD(&esp->queued_cmds);
2263 INIT_LIST_HEAD(&esp->active_cmds);
2264 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2265
2266
2267
2268
2269
2270 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2271 esp->target[i].flags = 0;
2272 esp->target[i].nego_goal_period = 0;
2273 esp->target[i].nego_goal_offset = 0;
2274 esp->target[i].nego_goal_width = 0;
2275 esp->target[i].nego_goal_tags = 0;
2276 }
2277}
2278
2279
2280static void esp_bootup_reset(struct esp *esp)
2281{
2282 u8 val;
2283
2284
2285 esp->ops->reset_dma(esp);
2286
2287
2288 esp_reset_esp(esp);
2289
2290
2291 val = esp_read8(ESP_CFG1);
2292 val |= ESP_CONFIG1_SRRDISAB;
2293 esp_write8(val, ESP_CFG1);
2294
2295 scsi_esp_cmd(esp, ESP_CMD_RS);
2296 udelay(400);
2297
2298 esp_write8(esp->config1, ESP_CFG1);
2299
2300
2301 esp_read8(ESP_INTRPT);
2302}
2303
2304static void esp_set_clock_params(struct esp *esp)
2305{
2306 int fhz;
2307 u8 ccf;
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341 fhz = esp->cfreq;
2342
2343 ccf = ((fhz / 1000000) + 4) / 5;
2344 if (ccf == 1)
2345 ccf = 2;
2346
2347
2348
2349
2350
2351
2352 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2353 fhz = 20000000;
2354 ccf = 4;
2355 }
2356
2357 esp->cfact = (ccf == 8 ? 0 : ccf);
2358 esp->cfreq = fhz;
2359 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2360 esp->ctick = ESP_TICK(ccf, esp->ccycle);
2361 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2362 esp->sync_defp = SYNC_DEFP_SLOW;
2363}
2364
2365static const char *esp_chip_names[] = {
2366 "ESP100",
2367 "ESP100A",
2368 "ESP236",
2369 "FAS236",
2370 "FAS100A",
2371 "FAST",
2372 "FASHME",
2373 "AM53C974",
2374};
2375
2376static struct scsi_transport_template *esp_transport_template;
2377
2378int scsi_esp_register(struct esp *esp)
2379{
2380 static int instance;
2381 int err;
2382
2383 if (!esp->num_tags)
2384 esp->num_tags = ESP_DEFAULT_TAGS;
2385 esp->host->transportt = esp_transport_template;
2386 esp->host->max_lun = ESP_MAX_LUN;
2387 esp->host->cmd_per_lun = 2;
2388 esp->host->unique_id = instance;
2389
2390 esp_set_clock_params(esp);
2391
2392 esp_get_revision(esp);
2393
2394 esp_init_swstate(esp);
2395
2396 esp_bootup_reset(esp);
2397
2398 dev_printk(KERN_INFO, esp->dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2399 esp->host->unique_id, esp->regs, esp->dma_regs,
2400 esp->host->irq);
2401 dev_printk(KERN_INFO, esp->dev,
2402 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2403 esp->host->unique_id, esp_chip_names[esp->rev],
2404 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2405
2406
2407 ssleep(esp_bus_reset_settle);
2408
2409 err = scsi_add_host(esp->host, esp->dev);
2410 if (err)
2411 return err;
2412
2413 instance++;
2414
2415 scsi_scan_host(esp->host);
2416
2417 return 0;
2418}
2419EXPORT_SYMBOL(scsi_esp_register);
2420
2421void scsi_esp_unregister(struct esp *esp)
2422{
2423 scsi_remove_host(esp->host);
2424}
2425EXPORT_SYMBOL(scsi_esp_unregister);
2426
2427static int esp_target_alloc(struct scsi_target *starget)
2428{
2429 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2430 struct esp_target_data *tp = &esp->target[starget->id];
2431
2432 tp->starget = starget;
2433
2434 return 0;
2435}
2436
2437static void esp_target_destroy(struct scsi_target *starget)
2438{
2439 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2440 struct esp_target_data *tp = &esp->target[starget->id];
2441
2442 tp->starget = NULL;
2443}
2444
2445static int esp_slave_alloc(struct scsi_device *dev)
2446{
2447 struct esp *esp = shost_priv(dev->host);
2448 struct esp_target_data *tp = &esp->target[dev->id];
2449 struct esp_lun_data *lp;
2450
2451 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2452 if (!lp)
2453 return -ENOMEM;
2454 dev->hostdata = lp;
2455
2456 spi_min_period(tp->starget) = esp->min_period;
2457 spi_max_offset(tp->starget) = 15;
2458
2459 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2460 spi_max_width(tp->starget) = 1;
2461 else
2462 spi_max_width(tp->starget) = 0;
2463
2464 return 0;
2465}
2466
2467static int esp_slave_configure(struct scsi_device *dev)
2468{
2469 struct esp *esp = shost_priv(dev->host);
2470 struct esp_target_data *tp = &esp->target[dev->id];
2471
2472 if (dev->tagged_supported)
2473 scsi_change_queue_depth(dev, esp->num_tags);
2474
2475 tp->flags |= ESP_TGT_DISCONNECT;
2476
2477 if (!spi_initial_dv(dev->sdev_target))
2478 spi_dv_device(dev);
2479
2480 return 0;
2481}
2482
2483static void esp_slave_destroy(struct scsi_device *dev)
2484{
2485 struct esp_lun_data *lp = dev->hostdata;
2486
2487 kfree(lp);
2488 dev->hostdata = NULL;
2489}
2490
2491static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2492{
2493 struct esp *esp = shost_priv(cmd->device->host);
2494 struct esp_cmd_entry *ent, *tmp;
2495 struct completion eh_done;
2496 unsigned long flags;
2497
2498
2499
2500
2501 spin_lock_irqsave(esp->host->host_lock, flags);
2502 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2503 cmd, cmd->cmnd[0]);
2504 ent = esp->active_cmd;
2505 if (ent)
2506 shost_printk(KERN_ERR, esp->host,
2507 "Current command [%p:%02x]\n",
2508 ent->cmd, ent->cmd->cmnd[0]);
2509 list_for_each_entry(ent, &esp->queued_cmds, list) {
2510 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2511 ent->cmd, ent->cmd->cmnd[0]);
2512 }
2513 list_for_each_entry(ent, &esp->active_cmds, list) {
2514 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2515 ent->cmd, ent->cmd->cmnd[0]);
2516 }
2517 esp_dump_cmd_log(esp);
2518 spin_unlock_irqrestore(esp->host->host_lock, flags);
2519
2520 spin_lock_irqsave(esp->host->host_lock, flags);
2521
2522 ent = NULL;
2523 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2524 if (tmp->cmd == cmd) {
2525 ent = tmp;
2526 break;
2527 }
2528 }
2529
2530 if (ent) {
2531
2532
2533
2534 list_del(&ent->list);
2535
2536 cmd->result = DID_ABORT << 16;
2537 cmd->scsi_done(cmd);
2538
2539 esp_put_ent(esp, ent);
2540
2541 goto out_success;
2542 }
2543
2544 init_completion(&eh_done);
2545
2546 ent = esp->active_cmd;
2547 if (ent && ent->cmd == cmd) {
2548
2549
2550
2551
2552 if (esp->msg_out_len)
2553 goto out_failure;
2554
2555
2556
2557
2558 esp->msg_out[0] = ABORT_TASK_SET;
2559 esp->msg_out_len = 1;
2560 ent->eh_done = &eh_done;
2561
2562 scsi_esp_cmd(esp, ESP_CMD_SATN);
2563 } else {
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580 goto out_failure;
2581 }
2582
2583 spin_unlock_irqrestore(esp->host->host_lock, flags);
2584
2585 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2586 spin_lock_irqsave(esp->host->host_lock, flags);
2587 ent->eh_done = NULL;
2588 spin_unlock_irqrestore(esp->host->host_lock, flags);
2589
2590 return FAILED;
2591 }
2592
2593 return SUCCESS;
2594
2595out_success:
2596 spin_unlock_irqrestore(esp->host->host_lock, flags);
2597 return SUCCESS;
2598
2599out_failure:
2600
2601
2602
2603
2604 spin_unlock_irqrestore(esp->host->host_lock, flags);
2605 return FAILED;
2606}
2607
2608static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2609{
2610 struct esp *esp = shost_priv(cmd->device->host);
2611 struct completion eh_reset;
2612 unsigned long flags;
2613
2614 init_completion(&eh_reset);
2615
2616 spin_lock_irqsave(esp->host->host_lock, flags);
2617
2618 esp->eh_reset = &eh_reset;
2619
2620
2621
2622
2623
2624
2625 esp->flags |= ESP_FLAG_RESETTING;
2626 scsi_esp_cmd(esp, ESP_CMD_RS);
2627
2628 spin_unlock_irqrestore(esp->host->host_lock, flags);
2629
2630 ssleep(esp_bus_reset_settle);
2631
2632 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2633 spin_lock_irqsave(esp->host->host_lock, flags);
2634 esp->eh_reset = NULL;
2635 spin_unlock_irqrestore(esp->host->host_lock, flags);
2636
2637 return FAILED;
2638 }
2639
2640 return SUCCESS;
2641}
2642
2643
2644static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2645{
2646 struct esp *esp = shost_priv(cmd->device->host);
2647 unsigned long flags;
2648
2649 spin_lock_irqsave(esp->host->host_lock, flags);
2650 esp_bootup_reset(esp);
2651 esp_reset_cleanup(esp);
2652 spin_unlock_irqrestore(esp->host->host_lock, flags);
2653
2654 ssleep(esp_bus_reset_settle);
2655
2656 return SUCCESS;
2657}
2658
2659static const char *esp_info(struct Scsi_Host *host)
2660{
2661 return "esp";
2662}
2663
2664struct scsi_host_template scsi_esp_template = {
2665 .module = THIS_MODULE,
2666 .name = "esp",
2667 .info = esp_info,
2668 .queuecommand = esp_queuecommand,
2669 .target_alloc = esp_target_alloc,
2670 .target_destroy = esp_target_destroy,
2671 .slave_alloc = esp_slave_alloc,
2672 .slave_configure = esp_slave_configure,
2673 .slave_destroy = esp_slave_destroy,
2674 .eh_abort_handler = esp_eh_abort_handler,
2675 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2676 .eh_host_reset_handler = esp_eh_host_reset_handler,
2677 .can_queue = 7,
2678 .this_id = 7,
2679 .sg_tablesize = SG_ALL,
2680 .max_sectors = 0xffff,
2681 .skip_settle_delay = 1,
2682};
2683EXPORT_SYMBOL(scsi_esp_template);
2684
2685static void esp_get_signalling(struct Scsi_Host *host)
2686{
2687 struct esp *esp = shost_priv(host);
2688 enum spi_signal_type type;
2689
2690 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2691 type = SPI_SIGNAL_HVD;
2692 else
2693 type = SPI_SIGNAL_SE;
2694
2695 spi_signalling(host) = type;
2696}
2697
2698static void esp_set_offset(struct scsi_target *target, int offset)
2699{
2700 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2701 struct esp *esp = shost_priv(host);
2702 struct esp_target_data *tp = &esp->target[target->id];
2703
2704 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2705 tp->nego_goal_offset = 0;
2706 else
2707 tp->nego_goal_offset = offset;
2708 tp->flags |= ESP_TGT_CHECK_NEGO;
2709}
2710
2711static void esp_set_period(struct scsi_target *target, int period)
2712{
2713 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2714 struct esp *esp = shost_priv(host);
2715 struct esp_target_data *tp = &esp->target[target->id];
2716
2717 tp->nego_goal_period = period;
2718 tp->flags |= ESP_TGT_CHECK_NEGO;
2719}
2720
2721static void esp_set_width(struct scsi_target *target, int width)
2722{
2723 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2724 struct esp *esp = shost_priv(host);
2725 struct esp_target_data *tp = &esp->target[target->id];
2726
2727 tp->nego_goal_width = (width ? 1 : 0);
2728 tp->flags |= ESP_TGT_CHECK_NEGO;
2729}
2730
2731static struct spi_function_template esp_transport_ops = {
2732 .set_offset = esp_set_offset,
2733 .show_offset = 1,
2734 .set_period = esp_set_period,
2735 .show_period = 1,
2736 .set_width = esp_set_width,
2737 .show_width = 1,
2738 .get_signalling = esp_get_signalling,
2739};
2740
2741static int __init esp_init(void)
2742{
2743 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2744 sizeof(struct esp_cmd_priv));
2745
2746 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2747 if (!esp_transport_template)
2748 return -ENODEV;
2749
2750 return 0;
2751}
2752
2753static void __exit esp_exit(void)
2754{
2755 spi_release_transport(esp_transport_template);
2756}
2757
2758MODULE_DESCRIPTION("ESP SCSI driver core");
2759MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2760MODULE_LICENSE("GPL");
2761MODULE_VERSION(DRV_VERSION);
2762
2763module_param(esp_bus_reset_settle, int, 0);
2764MODULE_PARM_DESC(esp_bus_reset_settle,
2765 "ESP scsi bus reset delay in seconds");
2766
2767module_param(esp_debug, int, 0);
2768MODULE_PARM_DESC(esp_debug,
2769"ESP bitmapped debugging message enable value:\n"
2770" 0x00000001 Log interrupt events\n"
2771" 0x00000002 Log scsi commands\n"
2772" 0x00000004 Log resets\n"
2773" 0x00000008 Log message in events\n"
2774" 0x00000010 Log message out events\n"
2775" 0x00000020 Log command completion\n"
2776" 0x00000040 Log disconnects\n"
2777" 0x00000080 Log data start\n"
2778" 0x00000100 Log data done\n"
2779" 0x00000200 Log reconnects\n"
2780" 0x00000400 Log auto-sense data\n"
2781);
2782
2783module_init(esp_init);
2784module_exit(esp_exit);
2785
2786#ifdef CONFIG_SCSI_ESP_PIO
2787static inline unsigned int esp_wait_for_fifo(struct esp *esp)
2788{
2789 int i = 500000;
2790
2791 do {
2792 unsigned int fbytes = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
2793
2794 if (fbytes)
2795 return fbytes;
2796
2797 udelay(1);
2798 } while (--i);
2799
2800 shost_printk(KERN_ERR, esp->host, "FIFO is empty. sreg [%02x]\n",
2801 esp_read8(ESP_STATUS));
2802 return 0;
2803}
2804
2805static inline int esp_wait_for_intr(struct esp *esp)
2806{
2807 int i = 500000;
2808
2809 do {
2810 esp->sreg = esp_read8(ESP_STATUS);
2811 if (esp->sreg & ESP_STAT_INTR)
2812 return 0;
2813
2814 udelay(1);
2815 } while (--i);
2816
2817 shost_printk(KERN_ERR, esp->host, "IRQ timeout. sreg [%02x]\n",
2818 esp->sreg);
2819 return 1;
2820}
2821
2822#define ESP_FIFO_SIZE 16
2823
2824void esp_send_pio_cmd(struct esp *esp, u32 addr, u32 esp_count,
2825 u32 dma_count, int write, u8 cmd)
2826{
2827 u8 phase = esp->sreg & ESP_STAT_PMASK;
2828
2829 cmd &= ~ESP_CMD_DMA;
2830 esp->send_cmd_error = 0;
2831
2832 if (write) {
2833 u8 *dst = (u8 *)addr;
2834 u8 mask = ~(phase == ESP_MIP ? ESP_INTR_FDONE : ESP_INTR_BSERV);
2835
2836 scsi_esp_cmd(esp, cmd);
2837
2838 while (1) {
2839 if (!esp_wait_for_fifo(esp))
2840 break;
2841
2842 *dst++ = readb(esp->fifo_reg);
2843 --esp_count;
2844
2845 if (!esp_count)
2846 break;
2847
2848 if (esp_wait_for_intr(esp)) {
2849 esp->send_cmd_error = 1;
2850 break;
2851 }
2852
2853 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2854 break;
2855
2856 esp->ireg = esp_read8(ESP_INTRPT);
2857 if (esp->ireg & mask) {
2858 esp->send_cmd_error = 1;
2859 break;
2860 }
2861
2862 if (phase == ESP_MIP)
2863 esp_write8(ESP_CMD_MOK, ESP_CMD);
2864
2865 esp_write8(ESP_CMD_TI, ESP_CMD);
2866 }
2867 } else {
2868 unsigned int n = ESP_FIFO_SIZE;
2869 u8 *src = (u8 *)addr;
2870
2871 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
2872
2873 if (n > esp_count)
2874 n = esp_count;
2875 writesb(esp->fifo_reg, src, n);
2876 src += n;
2877 esp_count -= n;
2878
2879 scsi_esp_cmd(esp, cmd);
2880
2881 while (esp_count) {
2882 if (esp_wait_for_intr(esp)) {
2883 esp->send_cmd_error = 1;
2884 break;
2885 }
2886
2887 if ((esp->sreg & ESP_STAT_PMASK) != phase)
2888 break;
2889
2890 esp->ireg = esp_read8(ESP_INTRPT);
2891 if (esp->ireg & ~ESP_INTR_BSERV) {
2892 esp->send_cmd_error = 1;
2893 break;
2894 }
2895
2896 n = ESP_FIFO_SIZE -
2897 (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES);
2898
2899 if (n > esp_count)
2900 n = esp_count;
2901 writesb(esp->fifo_reg, src, n);
2902 src += n;
2903 esp_count -= n;
2904
2905 esp_write8(ESP_CMD_TI, ESP_CMD);
2906 }
2907 }
2908
2909 esp->send_cmd_residual = esp_count;
2910}
2911EXPORT_SYMBOL(esp_send_pio_cmd);
2912#endif
2913