linux/drivers/scsi/esp_scsi.c
<<
>>
Prefs
   1/* esp_scsi.c: ESP SCSI driver.
   2 *
   3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
   4 */
   5
   6#include <linux/kernel.h>
   7#include <linux/types.h>
   8#include <linux/slab.h>
   9#include <linux/delay.h>
  10#include <linux/list.h>
  11#include <linux/completion.h>
  12#include <linux/kallsyms.h>
  13#include <linux/module.h>
  14#include <linux/moduleparam.h>
  15#include <linux/init.h>
  16#include <linux/irqreturn.h>
  17
  18#include <asm/irq.h>
  19#include <asm/io.h>
  20#include <asm/dma.h>
  21
  22#include <scsi/scsi.h>
  23#include <scsi/scsi_host.h>
  24#include <scsi/scsi_cmnd.h>
  25#include <scsi/scsi_device.h>
  26#include <scsi/scsi_tcq.h>
  27#include <scsi/scsi_dbg.h>
  28#include <scsi/scsi_transport_spi.h>
  29
  30#include "esp_scsi.h"
  31
  32#define DRV_MODULE_NAME         "esp"
  33#define PFX DRV_MODULE_NAME     ": "
  34#define DRV_VERSION             "2.000"
  35#define DRV_MODULE_RELDATE      "April 19, 2007"
  36
  37/* SCSI bus reset settle time in seconds.  */
  38static int esp_bus_reset_settle = 3;
  39
  40static u32 esp_debug;
  41#define ESP_DEBUG_INTR          0x00000001
  42#define ESP_DEBUG_SCSICMD       0x00000002
  43#define ESP_DEBUG_RESET         0x00000004
  44#define ESP_DEBUG_MSGIN         0x00000008
  45#define ESP_DEBUG_MSGOUT        0x00000010
  46#define ESP_DEBUG_CMDDONE       0x00000020
  47#define ESP_DEBUG_DISCONNECT    0x00000040
  48#define ESP_DEBUG_DATASTART     0x00000080
  49#define ESP_DEBUG_DATADONE      0x00000100
  50#define ESP_DEBUG_RECONNECT     0x00000200
  51#define ESP_DEBUG_AUTOSENSE     0x00000400
  52
  53#define esp_log_intr(f, a...) \
  54do {    if (esp_debug & ESP_DEBUG_INTR) \
  55                printk(f, ## a); \
  56} while (0)
  57
  58#define esp_log_reset(f, a...) \
  59do {    if (esp_debug & ESP_DEBUG_RESET) \
  60                printk(f, ## a); \
  61} while (0)
  62
  63#define esp_log_msgin(f, a...) \
  64do {    if (esp_debug & ESP_DEBUG_MSGIN) \
  65                printk(f, ## a); \
  66} while (0)
  67
  68#define esp_log_msgout(f, a...) \
  69do {    if (esp_debug & ESP_DEBUG_MSGOUT) \
  70                printk(f, ## a); \
  71} while (0)
  72
  73#define esp_log_cmddone(f, a...) \
  74do {    if (esp_debug & ESP_DEBUG_CMDDONE) \
  75                printk(f, ## a); \
  76} while (0)
  77
  78#define esp_log_disconnect(f, a...) \
  79do {    if (esp_debug & ESP_DEBUG_DISCONNECT) \
  80                printk(f, ## a); \
  81} while (0)
  82
  83#define esp_log_datastart(f, a...) \
  84do {    if (esp_debug & ESP_DEBUG_DATASTART) \
  85                printk(f, ## a); \
  86} while (0)
  87
  88#define esp_log_datadone(f, a...) \
  89do {    if (esp_debug & ESP_DEBUG_DATADONE) \
  90                printk(f, ## a); \
  91} while (0)
  92
  93#define esp_log_reconnect(f, a...) \
  94do {    if (esp_debug & ESP_DEBUG_RECONNECT) \
  95                printk(f, ## a); \
  96} while (0)
  97
  98#define esp_log_autosense(f, a...) \
  99do {    if (esp_debug & ESP_DEBUG_AUTOSENSE) \
 100                printk(f, ## a); \
 101} while (0)
 102
 103#define esp_read8(REG)          esp->ops->esp_read8(esp, REG)
 104#define esp_write8(VAL,REG)     esp->ops->esp_write8(esp, VAL, REG)
 105
 106static void esp_log_fill_regs(struct esp *esp,
 107                              struct esp_event_ent *p)
 108{
 109        p->sreg = esp->sreg;
 110        p->seqreg = esp->seqreg;
 111        p->sreg2 = esp->sreg2;
 112        p->ireg = esp->ireg;
 113        p->select_state = esp->select_state;
 114        p->event = esp->event;
 115}
 116
 117void scsi_esp_cmd(struct esp *esp, u8 val)
 118{
 119        struct esp_event_ent *p;
 120        int idx = esp->esp_event_cur;
 121
 122        p = &esp->esp_event_log[idx];
 123        p->type = ESP_EVENT_TYPE_CMD;
 124        p->val = val;
 125        esp_log_fill_regs(esp, p);
 126
 127        esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 128
 129        esp_write8(val, ESP_CMD);
 130}
 131EXPORT_SYMBOL(scsi_esp_cmd);
 132
 133static void esp_event(struct esp *esp, u8 val)
 134{
 135        struct esp_event_ent *p;
 136        int idx = esp->esp_event_cur;
 137
 138        p = &esp->esp_event_log[idx];
 139        p->type = ESP_EVENT_TYPE_EVENT;
 140        p->val = val;
 141        esp_log_fill_regs(esp, p);
 142
 143        esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 144
 145        esp->event = val;
 146}
 147
 148static void esp_dump_cmd_log(struct esp *esp)
 149{
 150        int idx = esp->esp_event_cur;
 151        int stop = idx;
 152
 153        printk(KERN_INFO PFX "esp%d: Dumping command log\n",
 154               esp->host->unique_id);
 155        do {
 156                struct esp_event_ent *p = &esp->esp_event_log[idx];
 157
 158                printk(KERN_INFO PFX "esp%d: ent[%d] %s ",
 159                       esp->host->unique_id, idx,
 160                       p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT");
 161
 162                printk("val[%02x] sreg[%02x] seqreg[%02x] "
 163                       "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
 164                       p->val, p->sreg, p->seqreg,
 165                       p->sreg2, p->ireg, p->select_state, p->event);
 166
 167                idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
 168        } while (idx != stop);
 169}
 170
 171static void esp_flush_fifo(struct esp *esp)
 172{
 173        scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 174        if (esp->rev == ESP236) {
 175                int lim = 1000;
 176
 177                while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
 178                        if (--lim == 0) {
 179                                printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES "
 180                                       "will not clear!\n",
 181                                       esp->host->unique_id);
 182                                break;
 183                        }
 184                        udelay(1);
 185                }
 186        }
 187}
 188
 189static void hme_read_fifo(struct esp *esp)
 190{
 191        int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
 192        int idx = 0;
 193
 194        while (fcnt--) {
 195                esp->fifo[idx++] = esp_read8(ESP_FDATA);
 196                esp->fifo[idx++] = esp_read8(ESP_FDATA);
 197        }
 198        if (esp->sreg2 & ESP_STAT2_F1BYTE) {
 199                esp_write8(0, ESP_FDATA);
 200                esp->fifo[idx++] = esp_read8(ESP_FDATA);
 201                scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 202        }
 203        esp->fifo_cnt = idx;
 204}
 205
 206static void esp_set_all_config3(struct esp *esp, u8 val)
 207{
 208        int i;
 209
 210        for (i = 0; i < ESP_MAX_TARGET; i++)
 211                esp->target[i].esp_config3 = val;
 212}
 213
 214/* Reset the ESP chip, _not_ the SCSI bus. */
 215static void esp_reset_esp(struct esp *esp)
 216{
 217        u8 family_code, version;
 218
 219        /* Now reset the ESP chip */
 220        scsi_esp_cmd(esp, ESP_CMD_RC);
 221        scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 222        if (esp->rev == FAST)
 223                esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
 224        scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
 225
 226        /* This is the only point at which it is reliable to read
 227         * the ID-code for a fast ESP chip variants.
 228         */
 229        esp->max_period = ((35 * esp->ccycle) / 1000);
 230        if (esp->rev == FAST) {
 231                version = esp_read8(ESP_UID);
 232                family_code = (version & 0xf8) >> 3;
 233                if (family_code == 0x02)
 234                        esp->rev = FAS236;
 235                else if (family_code == 0x0a)
 236                        esp->rev = FASHME; /* Version is usually '5'. */
 237                else
 238                        esp->rev = FAS100A;
 239                esp->min_period = ((4 * esp->ccycle) / 1000);
 240        } else {
 241                esp->min_period = ((5 * esp->ccycle) / 1000);
 242        }
 243        esp->max_period = (esp->max_period + 3)>>2;
 244        esp->min_period = (esp->min_period + 3)>>2;
 245
 246        esp_write8(esp->config1, ESP_CFG1);
 247        switch (esp->rev) {
 248        case ESP100:
 249                /* nothing to do */
 250                break;
 251
 252        case ESP100A:
 253                esp_write8(esp->config2, ESP_CFG2);
 254                break;
 255
 256        case ESP236:
 257                /* Slow 236 */
 258                esp_write8(esp->config2, ESP_CFG2);
 259                esp->prev_cfg3 = esp->target[0].esp_config3;
 260                esp_write8(esp->prev_cfg3, ESP_CFG3);
 261                break;
 262
 263        case FASHME:
 264                esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
 265                /* fallthrough... */
 266
 267        case FAS236:
 268                /* Fast 236 or HME */
 269                esp_write8(esp->config2, ESP_CFG2);
 270                if (esp->rev == FASHME) {
 271                        u8 cfg3 = esp->target[0].esp_config3;
 272
 273                        cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
 274                        if (esp->scsi_id >= 8)
 275                                cfg3 |= ESP_CONFIG3_IDBIT3;
 276                        esp_set_all_config3(esp, cfg3);
 277                } else {
 278                        u32 cfg3 = esp->target[0].esp_config3;
 279
 280                        cfg3 |= ESP_CONFIG3_FCLK;
 281                        esp_set_all_config3(esp, cfg3);
 282                }
 283                esp->prev_cfg3 = esp->target[0].esp_config3;
 284                esp_write8(esp->prev_cfg3, ESP_CFG3);
 285                if (esp->rev == FASHME) {
 286                        esp->radelay = 80;
 287                } else {
 288                        if (esp->flags & ESP_FLAG_DIFFERENTIAL)
 289                                esp->radelay = 0;
 290                        else
 291                                esp->radelay = 96;
 292                }
 293                break;
 294
 295        case FAS100A:
 296                /* Fast 100a */
 297                esp_write8(esp->config2, ESP_CFG2);
 298                esp_set_all_config3(esp,
 299                                    (esp->target[0].esp_config3 |
 300                                     ESP_CONFIG3_FCLOCK));
 301                esp->prev_cfg3 = esp->target[0].esp_config3;
 302                esp_write8(esp->prev_cfg3, ESP_CFG3);
 303                esp->radelay = 32;
 304                break;
 305
 306        default:
 307                break;
 308        }
 309
 310        /* Reload the configuration registers */
 311        esp_write8(esp->cfact, ESP_CFACT);
 312
 313        esp->prev_stp = 0;
 314        esp_write8(esp->prev_stp, ESP_STP);
 315
 316        esp->prev_soff = 0;
 317        esp_write8(esp->prev_soff, ESP_SOFF);
 318
 319        esp_write8(esp->neg_defp, ESP_TIMEO);
 320
 321        /* Eat any bitrot in the chip */
 322        esp_read8(ESP_INTRPT);
 323        udelay(100);
 324}
 325
 326static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
 327{
 328        struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 329        struct scatterlist *sg = scsi_sglist(cmd);
 330        int dir = cmd->sc_data_direction;
 331        int total, i;
 332
 333        if (dir == DMA_NONE)
 334                return;
 335
 336        spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
 337        spriv->cur_residue = sg_dma_len(sg);
 338        spriv->cur_sg = sg;
 339
 340        total = 0;
 341        for (i = 0; i < spriv->u.num_sg; i++)
 342                total += sg_dma_len(&sg[i]);
 343        spriv->tot_residue = total;
 344}
 345
 346static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
 347                                   struct scsi_cmnd *cmd)
 348{
 349        struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 350
 351        if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 352                return ent->sense_dma +
 353                        (ent->sense_ptr - cmd->sense_buffer);
 354        }
 355
 356        return sg_dma_address(p->cur_sg) +
 357                (sg_dma_len(p->cur_sg) -
 358                 p->cur_residue);
 359}
 360
 361static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
 362                                    struct scsi_cmnd *cmd)
 363{
 364        struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 365
 366        if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 367                return SCSI_SENSE_BUFFERSIZE -
 368                        (ent->sense_ptr - cmd->sense_buffer);
 369        }
 370        return p->cur_residue;
 371}
 372
 373static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
 374                            struct scsi_cmnd *cmd, unsigned int len)
 375{
 376        struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
 377
 378        if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 379                ent->sense_ptr += len;
 380                return;
 381        }
 382
 383        p->cur_residue -= len;
 384        p->tot_residue -= len;
 385        if (p->cur_residue < 0 || p->tot_residue < 0) {
 386                printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n",
 387                       esp->host->unique_id);
 388                printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] "
 389                       "len[%u]\n",
 390                       esp->host->unique_id,
 391                       p->cur_residue, p->tot_residue, len);
 392                p->cur_residue = 0;
 393                p->tot_residue = 0;
 394        }
 395        if (!p->cur_residue && p->tot_residue) {
 396                p->cur_sg++;
 397                p->cur_residue = sg_dma_len(p->cur_sg);
 398        }
 399}
 400
 401static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
 402{
 403        struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 404        int dir = cmd->sc_data_direction;
 405
 406        if (dir == DMA_NONE)
 407                return;
 408
 409        esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
 410}
 411
 412static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
 413{
 414        struct scsi_cmnd *cmd = ent->cmd;
 415        struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 416
 417        if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 418                ent->saved_sense_ptr = ent->sense_ptr;
 419                return;
 420        }
 421        ent->saved_cur_residue = spriv->cur_residue;
 422        ent->saved_cur_sg = spriv->cur_sg;
 423        ent->saved_tot_residue = spriv->tot_residue;
 424}
 425
 426static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
 427{
 428        struct scsi_cmnd *cmd = ent->cmd;
 429        struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
 430
 431        if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 432                ent->sense_ptr = ent->saved_sense_ptr;
 433                return;
 434        }
 435        spriv->cur_residue = ent->saved_cur_residue;
 436        spriv->cur_sg = ent->saved_cur_sg;
 437        spriv->tot_residue = ent->saved_tot_residue;
 438}
 439
 440static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
 441{
 442        if (cmd->cmd_len == 6 ||
 443            cmd->cmd_len == 10 ||
 444            cmd->cmd_len == 12) {
 445                esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
 446        } else {
 447                esp->flags |= ESP_FLAG_DOING_SLOWCMD;
 448        }
 449}
 450
 451static void esp_write_tgt_config3(struct esp *esp, int tgt)
 452{
 453        if (esp->rev > ESP100A) {
 454                u8 val = esp->target[tgt].esp_config3;
 455
 456                if (val != esp->prev_cfg3) {
 457                        esp->prev_cfg3 = val;
 458                        esp_write8(val, ESP_CFG3);
 459                }
 460        }
 461}
 462
 463static void esp_write_tgt_sync(struct esp *esp, int tgt)
 464{
 465        u8 off = esp->target[tgt].esp_offset;
 466        u8 per = esp->target[tgt].esp_period;
 467
 468        if (off != esp->prev_soff) {
 469                esp->prev_soff = off;
 470                esp_write8(off, ESP_SOFF);
 471        }
 472        if (per != esp->prev_stp) {
 473                esp->prev_stp = per;
 474                esp_write8(per, ESP_STP);
 475        }
 476}
 477
 478static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
 479{
 480        if (esp->rev == FASHME) {
 481                /* Arbitrary segment boundaries, 24-bit counts.  */
 482                if (dma_len > (1U << 24))
 483                        dma_len = (1U << 24);
 484        } else {
 485                u32 base, end;
 486
 487                /* ESP chip limits other variants by 16-bits of transfer
 488                 * count.  Actually on FAS100A and FAS236 we could get
 489                 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
 490                 * in the ESP_CFG2 register but that causes other unwanted
 491                 * changes so we don't use it currently.
 492                 */
 493                if (dma_len > (1U << 16))
 494                        dma_len = (1U << 16);
 495
 496                /* All of the DMA variants hooked up to these chips
 497                 * cannot handle crossing a 24-bit address boundary.
 498                 */
 499                base = dma_addr & ((1U << 24) - 1U);
 500                end = base + dma_len;
 501                if (end > (1U << 24))
 502                        end = (1U <<24);
 503                dma_len = end - base;
 504        }
 505        return dma_len;
 506}
 507
 508static int esp_need_to_nego_wide(struct esp_target_data *tp)
 509{
 510        struct scsi_target *target = tp->starget;
 511
 512        return spi_width(target) != tp->nego_goal_width;
 513}
 514
 515static int esp_need_to_nego_sync(struct esp_target_data *tp)
 516{
 517        struct scsi_target *target = tp->starget;
 518
 519        /* When offset is zero, period is "don't care".  */
 520        if (!spi_offset(target) && !tp->nego_goal_offset)
 521                return 0;
 522
 523        if (spi_offset(target) == tp->nego_goal_offset &&
 524            spi_period(target) == tp->nego_goal_period)
 525                return 0;
 526
 527        return 1;
 528}
 529
 530static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
 531                             struct esp_lun_data *lp)
 532{
 533        if (!ent->tag[0]) {
 534                /* Non-tagged, slot already taken?  */
 535                if (lp->non_tagged_cmd)
 536                        return -EBUSY;
 537
 538                if (lp->hold) {
 539                        /* We are being held by active tagged
 540                         * commands.
 541                         */
 542                        if (lp->num_tagged)
 543                                return -EBUSY;
 544
 545                        /* Tagged commands completed, we can unplug
 546                         * the queue and run this untagged command.
 547                         */
 548                        lp->hold = 0;
 549                } else if (lp->num_tagged) {
 550                        /* Plug the queue until num_tagged decreases
 551                         * to zero in esp_free_lun_tag.
 552                         */
 553                        lp->hold = 1;
 554                        return -EBUSY;
 555                }
 556
 557                lp->non_tagged_cmd = ent;
 558                return 0;
 559        } else {
 560                /* Tagged command, see if blocked by a
 561                 * non-tagged one.
 562                 */
 563                if (lp->non_tagged_cmd || lp->hold)
 564                        return -EBUSY;
 565        }
 566
 567        BUG_ON(lp->tagged_cmds[ent->tag[1]]);
 568
 569        lp->tagged_cmds[ent->tag[1]] = ent;
 570        lp->num_tagged++;
 571
 572        return 0;
 573}
 574
 575static void esp_free_lun_tag(struct esp_cmd_entry *ent,
 576                             struct esp_lun_data *lp)
 577{
 578        if (ent->tag[0]) {
 579                BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent);
 580                lp->tagged_cmds[ent->tag[1]] = NULL;
 581                lp->num_tagged--;
 582        } else {
 583                BUG_ON(lp->non_tagged_cmd != ent);
 584                lp->non_tagged_cmd = NULL;
 585        }
 586}
 587
 588/* When a contingent allegiance conditon is created, we force feed a
 589 * REQUEST_SENSE command to the device to fetch the sense data.  I
 590 * tried many other schemes, relying on the scsi error handling layer
 591 * to send out the REQUEST_SENSE automatically, but this was difficult
 592 * to get right especially in the presence of applications like smartd
 593 * which use SG_IO to send out their own REQUEST_SENSE commands.
 594 */
 595static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
 596{
 597        struct scsi_cmnd *cmd = ent->cmd;
 598        struct scsi_device *dev = cmd->device;
 599        int tgt, lun;
 600        u8 *p, val;
 601
 602        tgt = dev->id;
 603        lun = dev->lun;
 604
 605
 606        if (!ent->sense_ptr) {
 607                esp_log_autosense("esp%d: Doing auto-sense for "
 608                                  "tgt[%d] lun[%d]\n",
 609                                  esp->host->unique_id, tgt, lun);
 610
 611                ent->sense_ptr = cmd->sense_buffer;
 612                ent->sense_dma = esp->ops->map_single(esp,
 613                                                      ent->sense_ptr,
 614                                                      SCSI_SENSE_BUFFERSIZE,
 615                                                      DMA_FROM_DEVICE);
 616        }
 617        ent->saved_sense_ptr = ent->sense_ptr;
 618
 619        esp->active_cmd = ent;
 620
 621        p = esp->command_block;
 622        esp->msg_out_len = 0;
 623
 624        *p++ = IDENTIFY(0, lun);
 625        *p++ = REQUEST_SENSE;
 626        *p++ = ((dev->scsi_level <= SCSI_2) ?
 627                (lun << 5) : 0);
 628        *p++ = 0;
 629        *p++ = 0;
 630        *p++ = SCSI_SENSE_BUFFERSIZE;
 631        *p++ = 0;
 632
 633        esp->select_state = ESP_SELECT_BASIC;
 634
 635        val = tgt;
 636        if (esp->rev == FASHME)
 637                val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
 638        esp_write8(val, ESP_BUSID);
 639
 640        esp_write_tgt_sync(esp, tgt);
 641        esp_write_tgt_config3(esp, tgt);
 642
 643        val = (p - esp->command_block);
 644
 645        if (esp->rev == FASHME)
 646                scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 647        esp->ops->send_dma_cmd(esp, esp->command_block_dma,
 648                               val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
 649}
 650
 651static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
 652{
 653        struct esp_cmd_entry *ent;
 654
 655        list_for_each_entry(ent, &esp->queued_cmds, list) {
 656                struct scsi_cmnd *cmd = ent->cmd;
 657                struct scsi_device *dev = cmd->device;
 658                struct esp_lun_data *lp = dev->hostdata;
 659
 660                if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 661                        ent->tag[0] = 0;
 662                        ent->tag[1] = 0;
 663                        return ent;
 664                }
 665
 666                if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) {
 667                        ent->tag[0] = 0;
 668                        ent->tag[1] = 0;
 669                }
 670
 671                if (esp_alloc_lun_tag(ent, lp) < 0)
 672                        continue;
 673
 674                return ent;
 675        }
 676
 677        return NULL;
 678}
 679
 680static void esp_maybe_execute_command(struct esp *esp)
 681{
 682        struct esp_target_data *tp;
 683        struct esp_lun_data *lp;
 684        struct scsi_device *dev;
 685        struct scsi_cmnd *cmd;
 686        struct esp_cmd_entry *ent;
 687        int tgt, lun, i;
 688        u32 val, start_cmd;
 689        u8 *p;
 690
 691        if (esp->active_cmd ||
 692            (esp->flags & ESP_FLAG_RESETTING))
 693                return;
 694
 695        ent = find_and_prep_issuable_command(esp);
 696        if (!ent)
 697                return;
 698
 699        if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 700                esp_autosense(esp, ent);
 701                return;
 702        }
 703
 704        cmd = ent->cmd;
 705        dev = cmd->device;
 706        tgt = dev->id;
 707        lun = dev->lun;
 708        tp = &esp->target[tgt];
 709        lp = dev->hostdata;
 710
 711        list_del(&ent->list);
 712        list_add(&ent->list, &esp->active_cmds);
 713
 714        esp->active_cmd = ent;
 715
 716        esp_map_dma(esp, cmd);
 717        esp_save_pointers(esp, ent);
 718
 719        esp_check_command_len(esp, cmd);
 720
 721        p = esp->command_block;
 722
 723        esp->msg_out_len = 0;
 724        if (tp->flags & ESP_TGT_CHECK_NEGO) {
 725                /* Need to negotiate.  If the target is broken
 726                 * go for synchronous transfers and non-wide.
 727                 */
 728                if (tp->flags & ESP_TGT_BROKEN) {
 729                        tp->flags &= ~ESP_TGT_DISCONNECT;
 730                        tp->nego_goal_period = 0;
 731                        tp->nego_goal_offset = 0;
 732                        tp->nego_goal_width = 0;
 733                        tp->nego_goal_tags = 0;
 734                }
 735
 736                /* If the settings are not changing, skip this.  */
 737                if (spi_width(tp->starget) == tp->nego_goal_width &&
 738                    spi_period(tp->starget) == tp->nego_goal_period &&
 739                    spi_offset(tp->starget) == tp->nego_goal_offset) {
 740                        tp->flags &= ~ESP_TGT_CHECK_NEGO;
 741                        goto build_identify;
 742                }
 743
 744                if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
 745                        esp->msg_out_len =
 746                                spi_populate_width_msg(&esp->msg_out[0],
 747                                                       (tp->nego_goal_width ?
 748                                                        1 : 0));
 749                        tp->flags |= ESP_TGT_NEGO_WIDE;
 750                } else if (esp_need_to_nego_sync(tp)) {
 751                        esp->msg_out_len =
 752                                spi_populate_sync_msg(&esp->msg_out[0],
 753                                                      tp->nego_goal_period,
 754                                                      tp->nego_goal_offset);
 755                        tp->flags |= ESP_TGT_NEGO_SYNC;
 756                } else {
 757                        tp->flags &= ~ESP_TGT_CHECK_NEGO;
 758                }
 759
 760                /* Process it like a slow command.  */
 761                if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
 762                        esp->flags |= ESP_FLAG_DOING_SLOWCMD;
 763        }
 764
 765build_identify:
 766        /* If we don't have a lun-data struct yet, we're probing
 767         * so do not disconnect.  Also, do not disconnect unless
 768         * we have a tag on this command.
 769         */
 770        if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
 771                *p++ = IDENTIFY(1, lun);
 772        else
 773                *p++ = IDENTIFY(0, lun);
 774
 775        if (ent->tag[0] && esp->rev == ESP100) {
 776                /* ESP100 lacks select w/atn3 command, use select
 777                 * and stop instead.
 778                 */
 779                esp->flags |= ESP_FLAG_DOING_SLOWCMD;
 780        }
 781
 782        if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
 783                start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
 784                if (ent->tag[0]) {
 785                        *p++ = ent->tag[0];
 786                        *p++ = ent->tag[1];
 787
 788                        start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
 789                }
 790
 791                for (i = 0; i < cmd->cmd_len; i++)
 792                        *p++ = cmd->cmnd[i];
 793
 794                esp->select_state = ESP_SELECT_BASIC;
 795        } else {
 796                esp->cmd_bytes_left = cmd->cmd_len;
 797                esp->cmd_bytes_ptr = &cmd->cmnd[0];
 798
 799                if (ent->tag[0]) {
 800                        for (i = esp->msg_out_len - 1;
 801                             i >= 0; i--)
 802                                esp->msg_out[i + 2] = esp->msg_out[i];
 803                        esp->msg_out[0] = ent->tag[0];
 804                        esp->msg_out[1] = ent->tag[1];
 805                        esp->msg_out_len += 2;
 806                }
 807
 808                start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
 809                esp->select_state = ESP_SELECT_MSGOUT;
 810        }
 811        val = tgt;
 812        if (esp->rev == FASHME)
 813                val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
 814        esp_write8(val, ESP_BUSID);
 815
 816        esp_write_tgt_sync(esp, tgt);
 817        esp_write_tgt_config3(esp, tgt);
 818
 819        val = (p - esp->command_block);
 820
 821        if (esp_debug & ESP_DEBUG_SCSICMD) {
 822                printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
 823                for (i = 0; i < cmd->cmd_len; i++)
 824                        printk("%02x ", cmd->cmnd[i]);
 825                printk("]\n");
 826        }
 827
 828        if (esp->rev == FASHME)
 829                scsi_esp_cmd(esp, ESP_CMD_FLUSH);
 830        esp->ops->send_dma_cmd(esp, esp->command_block_dma,
 831                               val, 16, 0, start_cmd);
 832}
 833
 834static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
 835{
 836        struct list_head *head = &esp->esp_cmd_pool;
 837        struct esp_cmd_entry *ret;
 838
 839        if (list_empty(head)) {
 840                ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
 841        } else {
 842                ret = list_entry(head->next, struct esp_cmd_entry, list);
 843                list_del(&ret->list);
 844                memset(ret, 0, sizeof(*ret));
 845        }
 846        return ret;
 847}
 848
 849static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
 850{
 851        list_add(&ent->list, &esp->esp_cmd_pool);
 852}
 853
 854static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
 855                            struct scsi_cmnd *cmd, unsigned int result)
 856{
 857        struct scsi_device *dev = cmd->device;
 858        int tgt = dev->id;
 859        int lun = dev->lun;
 860
 861        esp->active_cmd = NULL;
 862        esp_unmap_dma(esp, cmd);
 863        esp_free_lun_tag(ent, dev->hostdata);
 864        cmd->result = result;
 865
 866        if (ent->eh_done) {
 867                complete(ent->eh_done);
 868                ent->eh_done = NULL;
 869        }
 870
 871        if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
 872                esp->ops->unmap_single(esp, ent->sense_dma,
 873                                       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
 874                ent->sense_ptr = NULL;
 875
 876                /* Restore the message/status bytes to what we actually
 877                 * saw originally.  Also, report that we are providing
 878                 * the sense data.
 879                 */
 880                cmd->result = ((DRIVER_SENSE << 24) |
 881                               (DID_OK << 16) |
 882                               (COMMAND_COMPLETE << 8) |
 883                               (SAM_STAT_CHECK_CONDITION << 0));
 884
 885                ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
 886                if (esp_debug & ESP_DEBUG_AUTOSENSE) {
 887                        int i;
 888
 889                        printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
 890                               esp->host->unique_id, tgt, lun);
 891                        for (i = 0; i < 18; i++)
 892                                printk("%02x ", cmd->sense_buffer[i]);
 893                        printk("]\n");
 894                }
 895        }
 896
 897        cmd->scsi_done(cmd);
 898
 899        list_del(&ent->list);
 900        esp_put_ent(esp, ent);
 901
 902        esp_maybe_execute_command(esp);
 903}
 904
 905static unsigned int compose_result(unsigned int status, unsigned int message,
 906                                   unsigned int driver_code)
 907{
 908        return (status | (message << 8) | (driver_code << 16));
 909}
 910
 911static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
 912{
 913        struct scsi_device *dev = ent->cmd->device;
 914        struct esp_lun_data *lp = dev->hostdata;
 915
 916        scsi_track_queue_full(dev, lp->num_tagged - 1);
 917}
 918
 919static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 920{
 921        struct scsi_device *dev = cmd->device;
 922        struct esp *esp = shost_priv(dev->host);
 923        struct esp_cmd_priv *spriv;
 924        struct esp_cmd_entry *ent;
 925
 926        ent = esp_get_ent(esp);
 927        if (!ent)
 928                return SCSI_MLQUEUE_HOST_BUSY;
 929
 930        ent->cmd = cmd;
 931
 932        cmd->scsi_done = done;
 933
 934        spriv = ESP_CMD_PRIV(cmd);
 935        spriv->u.dma_addr = ~(dma_addr_t)0x0;
 936
 937        list_add_tail(&ent->list, &esp->queued_cmds);
 938
 939        esp_maybe_execute_command(esp);
 940
 941        return 0;
 942}
 943
 944static int esp_check_gross_error(struct esp *esp)
 945{
 946        if (esp->sreg & ESP_STAT_SPAM) {
 947                /* Gross Error, could be one of:
 948                 * - top of fifo overwritten
 949                 * - top of command register overwritten
 950                 * - DMA programmed with wrong direction
 951                 * - improper phase change
 952                 */
 953                printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n",
 954                       esp->host->unique_id, esp->sreg);
 955                /* XXX Reset the chip. XXX */
 956                return 1;
 957        }
 958        return 0;
 959}
 960
 961static int esp_check_spur_intr(struct esp *esp)
 962{
 963        switch (esp->rev) {
 964        case ESP100:
 965        case ESP100A:
 966                /* The interrupt pending bit of the status register cannot
 967                 * be trusted on these revisions.
 968                 */
 969                esp->sreg &= ~ESP_STAT_INTR;
 970                break;
 971
 972        default:
 973                if (!(esp->sreg & ESP_STAT_INTR)) {
 974                        esp->ireg = esp_read8(ESP_INTRPT);
 975                        if (esp->ireg & ESP_INTR_SR)
 976                                return 1;
 977
 978                        /* If the DMA is indicating interrupt pending and the
 979                         * ESP is not, the only possibility is a DMA error.
 980                         */
 981                        if (!esp->ops->dma_error(esp)) {
 982                                printk(KERN_ERR PFX "esp%d: Spurious irq, "
 983                                       "sreg=%02x.\n",
 984                                       esp->host->unique_id, esp->sreg);
 985                                return -1;
 986                        }
 987
 988                        printk(KERN_ERR PFX "esp%d: DMA error\n",
 989                               esp->host->unique_id);
 990
 991                        /* XXX Reset the chip. XXX */
 992                        return -1;
 993                }
 994                break;
 995        }
 996
 997        return 0;
 998}
 999
1000static void esp_schedule_reset(struct esp *esp)
1001{
1002        esp_log_reset("ESP: esp_schedule_reset() from %p\n",
1003                      __builtin_return_address(0));
1004        esp->flags |= ESP_FLAG_RESETTING;
1005        esp_event(esp, ESP_EVENT_RESET);
1006}
1007
1008/* In order to avoid having to add a special half-reconnected state
1009 * into the driver we just sit here and poll through the rest of
1010 * the reselection process to get the tag message bytes.
1011 */
1012static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1013                                                    struct esp_lun_data *lp)
1014{
1015        struct esp_cmd_entry *ent;
1016        int i;
1017
1018        if (!lp->num_tagged) {
1019                printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n",
1020                       esp->host->unique_id);
1021                return NULL;
1022        }
1023
1024        esp_log_reconnect("ESP: reconnect tag, ");
1025
1026        for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1027                if (esp->ops->irq_pending(esp))
1028                        break;
1029        }
1030        if (i == ESP_QUICKIRQ_LIMIT) {
1031                printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n",
1032                       esp->host->unique_id);
1033                return NULL;
1034        }
1035
1036        esp->sreg = esp_read8(ESP_STATUS);
1037        esp->ireg = esp_read8(ESP_INTRPT);
1038
1039        esp_log_reconnect("IRQ(%d:%x:%x), ",
1040                          i, esp->ireg, esp->sreg);
1041
1042        if (esp->ireg & ESP_INTR_DC) {
1043                printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n",
1044                       esp->host->unique_id);
1045                return NULL;
1046        }
1047
1048        if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
1049                printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n",
1050                       esp->host->unique_id, esp->sreg);
1051                return NULL;
1052        }
1053
1054        /* DMA in the tag bytes... */
1055        esp->command_block[0] = 0xff;
1056        esp->command_block[1] = 0xff;
1057        esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1058                               2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1059
1060        /* ACK the msssage.  */
1061        scsi_esp_cmd(esp, ESP_CMD_MOK);
1062
1063        for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1064                if (esp->ops->irq_pending(esp)) {
1065                        esp->sreg = esp_read8(ESP_STATUS);
1066                        esp->ireg = esp_read8(ESP_INTRPT);
1067                        if (esp->ireg & ESP_INTR_FDONE)
1068                                break;
1069                }
1070                udelay(1);
1071        }
1072        if (i == ESP_RESELECT_TAG_LIMIT) {
1073                printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n",
1074                       esp->host->unique_id);
1075                return NULL;
1076        }
1077        esp->ops->dma_drain(esp);
1078        esp->ops->dma_invalidate(esp);
1079
1080        esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1081                          i, esp->ireg, esp->sreg,
1082                          esp->command_block[0],
1083                          esp->command_block[1]);
1084
1085        if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1086            esp->command_block[0] > ORDERED_QUEUE_TAG) {
1087                printk(KERN_ERR PFX "esp%d: Reconnect, bad tag "
1088                       "type %02x.\n",
1089                       esp->host->unique_id, esp->command_block[0]);
1090                return NULL;
1091        }
1092
1093        ent = lp->tagged_cmds[esp->command_block[1]];
1094        if (!ent) {
1095                printk(KERN_ERR PFX "esp%d: Reconnect, no entry for "
1096                       "tag %02x.\n",
1097                       esp->host->unique_id, esp->command_block[1]);
1098                return NULL;
1099        }
1100
1101        return ent;
1102}
1103
1104static int esp_reconnect(struct esp *esp)
1105{
1106        struct esp_cmd_entry *ent;
1107        struct esp_target_data *tp;
1108        struct esp_lun_data *lp;
1109        struct scsi_device *dev;
1110        int target, lun;
1111
1112        BUG_ON(esp->active_cmd);
1113        if (esp->rev == FASHME) {
1114                /* FASHME puts the target and lun numbers directly
1115                 * into the fifo.
1116                 */
1117                target = esp->fifo[0];
1118                lun = esp->fifo[1] & 0x7;
1119        } else {
1120                u8 bits = esp_read8(ESP_FDATA);
1121
1122                /* Older chips put the lun directly into the fifo, but
1123                 * the target is given as a sample of the arbitration
1124                 * lines on the bus at reselection time.  So we should
1125                 * see the ID of the ESP and the one reconnecting target
1126                 * set in the bitmap.
1127                 */
1128                if (!(bits & esp->scsi_id_mask))
1129                        goto do_reset;
1130                bits &= ~esp->scsi_id_mask;
1131                if (!bits || (bits & (bits - 1)))
1132                        goto do_reset;
1133
1134                target = ffs(bits) - 1;
1135                lun = (esp_read8(ESP_FDATA) & 0x7);
1136
1137                scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1138                if (esp->rev == ESP100) {
1139                        u8 ireg = esp_read8(ESP_INTRPT);
1140                        /* This chip has a bug during reselection that can
1141                         * cause a spurious illegal-command interrupt, which
1142                         * we simply ACK here.  Another possibility is a bus
1143                         * reset so we must check for that.
1144                         */
1145                        if (ireg & ESP_INTR_SR)
1146                                goto do_reset;
1147                }
1148                scsi_esp_cmd(esp, ESP_CMD_NULL);
1149        }
1150
1151        esp_write_tgt_sync(esp, target);
1152        esp_write_tgt_config3(esp, target);
1153
1154        scsi_esp_cmd(esp, ESP_CMD_MOK);
1155
1156        if (esp->rev == FASHME)
1157                esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1158                           ESP_BUSID);
1159
1160        tp = &esp->target[target];
1161        dev = __scsi_device_lookup_by_target(tp->starget, lun);
1162        if (!dev) {
1163                printk(KERN_ERR PFX "esp%d: Reconnect, no lp "
1164                       "tgt[%u] lun[%u]\n",
1165                       esp->host->unique_id, target, lun);
1166                goto do_reset;
1167        }
1168        lp = dev->hostdata;
1169
1170        ent = lp->non_tagged_cmd;
1171        if (!ent) {
1172                ent = esp_reconnect_with_tag(esp, lp);
1173                if (!ent)
1174                        goto do_reset;
1175        }
1176
1177        esp->active_cmd = ent;
1178
1179        if (ent->flags & ESP_CMD_FLAG_ABORT) {
1180                esp->msg_out[0] = ABORT_TASK_SET;
1181                esp->msg_out_len = 1;
1182                scsi_esp_cmd(esp, ESP_CMD_SATN);
1183        }
1184
1185        esp_event(esp, ESP_EVENT_CHECK_PHASE);
1186        esp_restore_pointers(esp, ent);
1187        esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1188        return 1;
1189
1190do_reset:
1191        esp_schedule_reset(esp);
1192        return 0;
1193}
1194
1195static int esp_finish_select(struct esp *esp)
1196{
1197        struct esp_cmd_entry *ent;
1198        struct scsi_cmnd *cmd;
1199        u8 orig_select_state;
1200
1201        orig_select_state = esp->select_state;
1202
1203        /* No longer selecting.  */
1204        esp->select_state = ESP_SELECT_NONE;
1205
1206        esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1207        ent = esp->active_cmd;
1208        cmd = ent->cmd;
1209
1210        if (esp->ops->dma_error(esp)) {
1211                /* If we see a DMA error during or as a result of selection,
1212                 * all bets are off.
1213                 */
1214                esp_schedule_reset(esp);
1215                esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1216                return 0;
1217        }
1218
1219        esp->ops->dma_invalidate(esp);
1220
1221        if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1222                struct esp_target_data *tp = &esp->target[cmd->device->id];
1223
1224                /* Carefully back out of the selection attempt.  Release
1225                 * resources (such as DMA mapping & TAG) and reset state (such
1226                 * as message out and command delivery variables).
1227                 */
1228                if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1229                        esp_unmap_dma(esp, cmd);
1230                        esp_free_lun_tag(ent, cmd->device->hostdata);
1231                        tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1232                        esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1233                        esp->cmd_bytes_ptr = NULL;
1234                        esp->cmd_bytes_left = 0;
1235                } else {
1236                        esp->ops->unmap_single(esp, ent->sense_dma,
1237                                               SCSI_SENSE_BUFFERSIZE,
1238                                               DMA_FROM_DEVICE);
1239                        ent->sense_ptr = NULL;
1240                }
1241
1242                /* Now that the state is unwound properly, put back onto
1243                 * the issue queue.  This command is no longer active.
1244                 */
1245                list_del(&ent->list);
1246                list_add(&ent->list, &esp->queued_cmds);
1247                esp->active_cmd = NULL;
1248
1249                /* Return value ignored by caller, it directly invokes
1250                 * esp_reconnect().
1251                 */
1252                return 0;
1253        }
1254
1255        if (esp->ireg == ESP_INTR_DC) {
1256                struct scsi_device *dev = cmd->device;
1257
1258                /* Disconnect.  Make sure we re-negotiate sync and
1259                 * wide parameters if this target starts responding
1260                 * again in the future.
1261                 */
1262                esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1263
1264                scsi_esp_cmd(esp, ESP_CMD_ESEL);
1265                esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1266                return 1;
1267        }
1268
1269        if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1270                /* Selection successful.  On pre-FAST chips we have
1271                 * to do a NOP and possibly clean out the FIFO.
1272                 */
1273                if (esp->rev <= ESP236) {
1274                        int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1275
1276                        scsi_esp_cmd(esp, ESP_CMD_NULL);
1277
1278                        if (!fcnt &&
1279                            (!esp->prev_soff ||
1280                             ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1281                                esp_flush_fifo(esp);
1282                }
1283
1284                /* If we are doing a slow command, negotiation, etc.
1285                 * we'll do the right thing as we transition to the
1286                 * next phase.
1287                 */
1288                esp_event(esp, ESP_EVENT_CHECK_PHASE);
1289                return 0;
1290        }
1291
1292        printk("ESP: Unexpected selection completion ireg[%x].\n",
1293               esp->ireg);
1294        esp_schedule_reset(esp);
1295        return 0;
1296}
1297
1298static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1299                               struct scsi_cmnd *cmd)
1300{
1301        int fifo_cnt, ecount, bytes_sent, flush_fifo;
1302
1303        fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1304        if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1305                fifo_cnt <<= 1;
1306
1307        ecount = 0;
1308        if (!(esp->sreg & ESP_STAT_TCNT)) {
1309                ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1310                          (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1311                if (esp->rev == FASHME)
1312                        ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1313        }
1314
1315        bytes_sent = esp->data_dma_len;
1316        bytes_sent -= ecount;
1317
1318        if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1319                bytes_sent -= fifo_cnt;
1320
1321        flush_fifo = 0;
1322        if (!esp->prev_soff) {
1323                /* Synchronous data transfer, always flush fifo. */
1324                flush_fifo = 1;
1325        } else {
1326                if (esp->rev == ESP100) {
1327                        u32 fflags, phase;
1328
1329                        /* ESP100 has a chip bug where in the synchronous data
1330                         * phase it can mistake a final long REQ pulse from the
1331                         * target as an extra data byte.  Fun.
1332                         *
1333                         * To detect this case we resample the status register
1334                         * and fifo flags.  If we're still in a data phase and
1335                         * we see spurious chunks in the fifo, we return error
1336                         * to the caller which should reset and set things up
1337                         * such that we only try future transfers to this
1338                         * target in synchronous mode.
1339                         */
1340                        esp->sreg = esp_read8(ESP_STATUS);
1341                        phase = esp->sreg & ESP_STAT_PMASK;
1342                        fflags = esp_read8(ESP_FFLAGS);
1343
1344                        if ((phase == ESP_DOP &&
1345                             (fflags & ESP_FF_ONOTZERO)) ||
1346                            (phase == ESP_DIP &&
1347                             (fflags & ESP_FF_FBYTES)))
1348                                return -1;
1349                }
1350                if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1351                        flush_fifo = 1;
1352        }
1353
1354        if (flush_fifo)
1355                esp_flush_fifo(esp);
1356
1357        return bytes_sent;
1358}
1359
1360static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1361                        u8 scsi_period, u8 scsi_offset,
1362                        u8 esp_stp, u8 esp_soff)
1363{
1364        spi_period(tp->starget) = scsi_period;
1365        spi_offset(tp->starget) = scsi_offset;
1366        spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1367
1368        if (esp_soff) {
1369                esp_stp &= 0x1f;
1370                esp_soff |= esp->radelay;
1371                if (esp->rev >= FAS236) {
1372                        u8 bit = ESP_CONFIG3_FSCSI;
1373                        if (esp->rev >= FAS100A)
1374                                bit = ESP_CONFIG3_FAST;
1375
1376                        if (scsi_period < 50) {
1377                                if (esp->rev == FASHME)
1378                                        esp_soff &= ~esp->radelay;
1379                                tp->esp_config3 |= bit;
1380                        } else {
1381                                tp->esp_config3 &= ~bit;
1382                        }
1383                        esp->prev_cfg3 = tp->esp_config3;
1384                        esp_write8(esp->prev_cfg3, ESP_CFG3);
1385                }
1386        }
1387
1388        tp->esp_period = esp->prev_stp = esp_stp;
1389        tp->esp_offset = esp->prev_soff = esp_soff;
1390
1391        esp_write8(esp_soff, ESP_SOFF);
1392        esp_write8(esp_stp, ESP_STP);
1393
1394        tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1395
1396        spi_display_xfer_agreement(tp->starget);
1397}
1398
1399static void esp_msgin_reject(struct esp *esp)
1400{
1401        struct esp_cmd_entry *ent = esp->active_cmd;
1402        struct scsi_cmnd *cmd = ent->cmd;
1403        struct esp_target_data *tp;
1404        int tgt;
1405
1406        tgt = cmd->device->id;
1407        tp = &esp->target[tgt];
1408
1409        if (tp->flags & ESP_TGT_NEGO_WIDE) {
1410                tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1411
1412                if (!esp_need_to_nego_sync(tp)) {
1413                        tp->flags &= ~ESP_TGT_CHECK_NEGO;
1414                        scsi_esp_cmd(esp, ESP_CMD_RATN);
1415                } else {
1416                        esp->msg_out_len =
1417                                spi_populate_sync_msg(&esp->msg_out[0],
1418                                                      tp->nego_goal_period,
1419                                                      tp->nego_goal_offset);
1420                        tp->flags |= ESP_TGT_NEGO_SYNC;
1421                        scsi_esp_cmd(esp, ESP_CMD_SATN);
1422                }
1423                return;
1424        }
1425
1426        if (tp->flags & ESP_TGT_NEGO_SYNC) {
1427                tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1428                tp->esp_period = 0;
1429                tp->esp_offset = 0;
1430                esp_setsync(esp, tp, 0, 0, 0, 0);
1431                scsi_esp_cmd(esp, ESP_CMD_RATN);
1432                return;
1433        }
1434
1435        esp->msg_out[0] = ABORT_TASK_SET;
1436        esp->msg_out_len = 1;
1437        scsi_esp_cmd(esp, ESP_CMD_SATN);
1438}
1439
1440static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1441{
1442        u8 period = esp->msg_in[3];
1443        u8 offset = esp->msg_in[4];
1444        u8 stp;
1445
1446        if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1447                goto do_reject;
1448
1449        if (offset > 15)
1450                goto do_reject;
1451
1452        if (esp->flags & ESP_FLAG_DISABLE_SYNC)
1453                offset = 0;
1454
1455        if (offset) {
1456                int one_clock;
1457
1458                if (period > esp->max_period) {
1459                        period = offset = 0;
1460                        goto do_sdtr;
1461                }
1462                if (period < esp->min_period)
1463                        goto do_reject;
1464
1465                one_clock = esp->ccycle / 1000;
1466                stp = DIV_ROUND_UP(period << 2, one_clock);
1467                if (stp && esp->rev >= FAS236) {
1468                        if (stp >= 50)
1469                                stp--;
1470                }
1471        } else {
1472                stp = 0;
1473        }
1474
1475        esp_setsync(esp, tp, period, offset, stp, offset);
1476        return;
1477
1478do_reject:
1479        esp->msg_out[0] = MESSAGE_REJECT;
1480        esp->msg_out_len = 1;
1481        scsi_esp_cmd(esp, ESP_CMD_SATN);
1482        return;
1483
1484do_sdtr:
1485        tp->nego_goal_period = period;
1486        tp->nego_goal_offset = offset;
1487        esp->msg_out_len =
1488                spi_populate_sync_msg(&esp->msg_out[0],
1489                                      tp->nego_goal_period,
1490                                      tp->nego_goal_offset);
1491        scsi_esp_cmd(esp, ESP_CMD_SATN);
1492}
1493
1494static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1495{
1496        int size = 8 << esp->msg_in[3];
1497        u8 cfg3;
1498
1499        if (esp->rev != FASHME)
1500                goto do_reject;
1501
1502        if (size != 8 && size != 16)
1503                goto do_reject;
1504
1505        if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1506                goto do_reject;
1507
1508        cfg3 = tp->esp_config3;
1509        if (size == 16) {
1510                tp->flags |= ESP_TGT_WIDE;
1511                cfg3 |= ESP_CONFIG3_EWIDE;
1512        } else {
1513                tp->flags &= ~ESP_TGT_WIDE;
1514                cfg3 &= ~ESP_CONFIG3_EWIDE;
1515        }
1516        tp->esp_config3 = cfg3;
1517        esp->prev_cfg3 = cfg3;
1518        esp_write8(cfg3, ESP_CFG3);
1519
1520        tp->flags &= ~ESP_TGT_NEGO_WIDE;
1521
1522        spi_period(tp->starget) = 0;
1523        spi_offset(tp->starget) = 0;
1524        if (!esp_need_to_nego_sync(tp)) {
1525                tp->flags &= ~ESP_TGT_CHECK_NEGO;
1526                scsi_esp_cmd(esp, ESP_CMD_RATN);
1527        } else {
1528                esp->msg_out_len =
1529                        spi_populate_sync_msg(&esp->msg_out[0],
1530                                              tp->nego_goal_period,
1531                                              tp->nego_goal_offset);
1532                tp->flags |= ESP_TGT_NEGO_SYNC;
1533                scsi_esp_cmd(esp, ESP_CMD_SATN);
1534        }
1535        return;
1536
1537do_reject:
1538        esp->msg_out[0] = MESSAGE_REJECT;
1539        esp->msg_out_len = 1;
1540        scsi_esp_cmd(esp, ESP_CMD_SATN);
1541}
1542
1543static void esp_msgin_extended(struct esp *esp)
1544{
1545        struct esp_cmd_entry *ent = esp->active_cmd;
1546        struct scsi_cmnd *cmd = ent->cmd;
1547        struct esp_target_data *tp;
1548        int tgt = cmd->device->id;
1549
1550        tp = &esp->target[tgt];
1551        if (esp->msg_in[2] == EXTENDED_SDTR) {
1552                esp_msgin_sdtr(esp, tp);
1553                return;
1554        }
1555        if (esp->msg_in[2] == EXTENDED_WDTR) {
1556                esp_msgin_wdtr(esp, tp);
1557                return;
1558        }
1559
1560        printk("ESP: Unexpected extended msg type %x\n",
1561               esp->msg_in[2]);
1562
1563        esp->msg_out[0] = ABORT_TASK_SET;
1564        esp->msg_out_len = 1;
1565        scsi_esp_cmd(esp, ESP_CMD_SATN);
1566}
1567
1568/* Analyze msgin bytes received from target so far.  Return non-zero
1569 * if there are more bytes needed to complete the message.
1570 */
1571static int esp_msgin_process(struct esp *esp)
1572{
1573        u8 msg0 = esp->msg_in[0];
1574        int len = esp->msg_in_len;
1575
1576        if (msg0 & 0x80) {
1577                /* Identify */
1578                printk("ESP: Unexpected msgin identify\n");
1579                return 0;
1580        }
1581
1582        switch (msg0) {
1583        case EXTENDED_MESSAGE:
1584                if (len == 1)
1585                        return 1;
1586                if (len < esp->msg_in[1] + 2)
1587                        return 1;
1588                esp_msgin_extended(esp);
1589                return 0;
1590
1591        case IGNORE_WIDE_RESIDUE: {
1592                struct esp_cmd_entry *ent;
1593                struct esp_cmd_priv *spriv;
1594                if (len == 1)
1595                        return 1;
1596
1597                if (esp->msg_in[1] != 1)
1598                        goto do_reject;
1599
1600                ent = esp->active_cmd;
1601                spriv = ESP_CMD_PRIV(ent->cmd);
1602
1603                if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1604                        spriv->cur_sg--;
1605                        spriv->cur_residue = 1;
1606                } else
1607                        spriv->cur_residue++;
1608                spriv->tot_residue++;
1609                return 0;
1610        }
1611        case NOP:
1612                return 0;
1613        case RESTORE_POINTERS:
1614                esp_restore_pointers(esp, esp->active_cmd);
1615                return 0;
1616        case SAVE_POINTERS:
1617                esp_save_pointers(esp, esp->active_cmd);
1618                return 0;
1619
1620        case COMMAND_COMPLETE:
1621        case DISCONNECT: {
1622                struct esp_cmd_entry *ent = esp->active_cmd;
1623
1624                ent->message = msg0;
1625                esp_event(esp, ESP_EVENT_FREE_BUS);
1626                esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1627                return 0;
1628        }
1629        case MESSAGE_REJECT:
1630                esp_msgin_reject(esp);
1631                return 0;
1632
1633        default:
1634        do_reject:
1635                esp->msg_out[0] = MESSAGE_REJECT;
1636                esp->msg_out_len = 1;
1637                scsi_esp_cmd(esp, ESP_CMD_SATN);
1638                return 0;
1639        }
1640}
1641
1642static int esp_process_event(struct esp *esp)
1643{
1644        int write;
1645
1646again:
1647        write = 0;
1648        switch (esp->event) {
1649        case ESP_EVENT_CHECK_PHASE:
1650                switch (esp->sreg & ESP_STAT_PMASK) {
1651                case ESP_DOP:
1652                        esp_event(esp, ESP_EVENT_DATA_OUT);
1653                        break;
1654                case ESP_DIP:
1655                        esp_event(esp, ESP_EVENT_DATA_IN);
1656                        break;
1657                case ESP_STATP:
1658                        esp_flush_fifo(esp);
1659                        scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1660                        esp_event(esp, ESP_EVENT_STATUS);
1661                        esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1662                        return 1;
1663
1664                case ESP_MOP:
1665                        esp_event(esp, ESP_EVENT_MSGOUT);
1666                        break;
1667
1668                case ESP_MIP:
1669                        esp_event(esp, ESP_EVENT_MSGIN);
1670                        break;
1671
1672                case ESP_CMDP:
1673                        esp_event(esp, ESP_EVENT_CMD_START);
1674                        break;
1675
1676                default:
1677                        printk("ESP: Unexpected phase, sreg=%02x\n",
1678                               esp->sreg);
1679                        esp_schedule_reset(esp);
1680                        return 0;
1681                }
1682                goto again;
1683                break;
1684
1685        case ESP_EVENT_DATA_IN:
1686                write = 1;
1687                /* fallthru */
1688
1689        case ESP_EVENT_DATA_OUT: {
1690                struct esp_cmd_entry *ent = esp->active_cmd;
1691                struct scsi_cmnd *cmd = ent->cmd;
1692                dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1693                unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1694
1695                if (esp->rev == ESP100)
1696                        scsi_esp_cmd(esp, ESP_CMD_NULL);
1697
1698                if (write)
1699                        ent->flags |= ESP_CMD_FLAG_WRITE;
1700                else
1701                        ent->flags &= ~ESP_CMD_FLAG_WRITE;
1702
1703                if (esp->ops->dma_length_limit)
1704                        dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1705                                                             dma_len);
1706                else
1707                        dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1708
1709                esp->data_dma_len = dma_len;
1710
1711                if (!dma_len) {
1712                        printk(KERN_ERR PFX "esp%d: DMA length is zero!\n",
1713                               esp->host->unique_id);
1714                        printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n",
1715                               esp->host->unique_id,
1716                               (unsigned long long)esp_cur_dma_addr(ent, cmd),
1717                               esp_cur_dma_len(ent, cmd));
1718                        esp_schedule_reset(esp);
1719                        return 0;
1720                }
1721
1722                esp_log_datastart("ESP: start data addr[%08llx] len[%u] "
1723                                  "write(%d)\n",
1724                                  (unsigned long long)dma_addr, dma_len, write);
1725
1726                esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1727                                       write, ESP_CMD_DMA | ESP_CMD_TI);
1728                esp_event(esp, ESP_EVENT_DATA_DONE);
1729                break;
1730        }
1731        case ESP_EVENT_DATA_DONE: {
1732                struct esp_cmd_entry *ent = esp->active_cmd;
1733                struct scsi_cmnd *cmd = ent->cmd;
1734                int bytes_sent;
1735
1736                if (esp->ops->dma_error(esp)) {
1737                        printk("ESP: data done, DMA error, resetting\n");
1738                        esp_schedule_reset(esp);
1739                        return 0;
1740                }
1741
1742                if (ent->flags & ESP_CMD_FLAG_WRITE) {
1743                        /* XXX parity errors, etc. XXX */
1744
1745                        esp->ops->dma_drain(esp);
1746                }
1747                esp->ops->dma_invalidate(esp);
1748
1749                if (esp->ireg != ESP_INTR_BSERV) {
1750                        /* We should always see exactly a bus-service
1751                         * interrupt at the end of a successful transfer.
1752                         */
1753                        printk("ESP: data done, not BSERV, resetting\n");
1754                        esp_schedule_reset(esp);
1755                        return 0;
1756                }
1757
1758                bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1759
1760                esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n",
1761                                 ent->flags, bytes_sent);
1762
1763                if (bytes_sent < 0) {
1764                        /* XXX force sync mode for this target XXX */
1765                        esp_schedule_reset(esp);
1766                        return 0;
1767                }
1768
1769                esp_advance_dma(esp, ent, cmd, bytes_sent);
1770                esp_event(esp, ESP_EVENT_CHECK_PHASE);
1771                goto again;
1772        }
1773
1774        case ESP_EVENT_STATUS: {
1775                struct esp_cmd_entry *ent = esp->active_cmd;
1776
1777                if (esp->ireg & ESP_INTR_FDONE) {
1778                        ent->status = esp_read8(ESP_FDATA);
1779                        ent->message = esp_read8(ESP_FDATA);
1780                        scsi_esp_cmd(esp, ESP_CMD_MOK);
1781                } else if (esp->ireg == ESP_INTR_BSERV) {
1782                        ent->status = esp_read8(ESP_FDATA);
1783                        ent->message = 0xff;
1784                        esp_event(esp, ESP_EVENT_MSGIN);
1785                        return 0;
1786                }
1787
1788                if (ent->message != COMMAND_COMPLETE) {
1789                        printk("ESP: Unexpected message %x in status\n",
1790                               ent->message);
1791                        esp_schedule_reset(esp);
1792                        return 0;
1793                }
1794
1795                esp_event(esp, ESP_EVENT_FREE_BUS);
1796                esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1797                break;
1798        }
1799        case ESP_EVENT_FREE_BUS: {
1800                struct esp_cmd_entry *ent = esp->active_cmd;
1801                struct scsi_cmnd *cmd = ent->cmd;
1802
1803                if (ent->message == COMMAND_COMPLETE ||
1804                    ent->message == DISCONNECT)
1805                        scsi_esp_cmd(esp, ESP_CMD_ESEL);
1806
1807                if (ent->message == COMMAND_COMPLETE) {
1808                        esp_log_cmddone("ESP: Command done status[%x] "
1809                                        "message[%x]\n",
1810                                        ent->status, ent->message);
1811                        if (ent->status == SAM_STAT_TASK_SET_FULL)
1812                                esp_event_queue_full(esp, ent);
1813
1814                        if (ent->status == SAM_STAT_CHECK_CONDITION &&
1815                            !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1816                                ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1817                                esp_autosense(esp, ent);
1818                        } else {
1819                                esp_cmd_is_done(esp, ent, cmd,
1820                                                compose_result(ent->status,
1821                                                               ent->message,
1822                                                               DID_OK));
1823                        }
1824                } else if (ent->message == DISCONNECT) {
1825                        esp_log_disconnect("ESP: Disconnecting tgt[%d] "
1826                                           "tag[%x:%x]\n",
1827                                           cmd->device->id,
1828                                           ent->tag[0], ent->tag[1]);
1829
1830                        esp->active_cmd = NULL;
1831                        esp_maybe_execute_command(esp);
1832                } else {
1833                        printk("ESP: Unexpected message %x in freebus\n",
1834                               ent->message);
1835                        esp_schedule_reset(esp);
1836                        return 0;
1837                }
1838                if (esp->active_cmd)
1839                        esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1840                break;
1841        }
1842        case ESP_EVENT_MSGOUT: {
1843                scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1844
1845                if (esp_debug & ESP_DEBUG_MSGOUT) {
1846                        int i;
1847                        printk("ESP: Sending message [ ");
1848                        for (i = 0; i < esp->msg_out_len; i++)
1849                                printk("%02x ", esp->msg_out[i]);
1850                        printk("]\n");
1851                }
1852
1853                if (esp->rev == FASHME) {
1854                        int i;
1855
1856                        /* Always use the fifo.  */
1857                        for (i = 0; i < esp->msg_out_len; i++) {
1858                                esp_write8(esp->msg_out[i], ESP_FDATA);
1859                                esp_write8(0, ESP_FDATA);
1860                        }
1861                        scsi_esp_cmd(esp, ESP_CMD_TI);
1862                } else {
1863                        if (esp->msg_out_len == 1) {
1864                                esp_write8(esp->msg_out[0], ESP_FDATA);
1865                                scsi_esp_cmd(esp, ESP_CMD_TI);
1866                        } else {
1867                                /* Use DMA. */
1868                                memcpy(esp->command_block,
1869                                       esp->msg_out,
1870                                       esp->msg_out_len);
1871
1872                                esp->ops->send_dma_cmd(esp,
1873                                                       esp->command_block_dma,
1874                                                       esp->msg_out_len,
1875                                                       esp->msg_out_len,
1876                                                       0,
1877                                                       ESP_CMD_DMA|ESP_CMD_TI);
1878                        }
1879                }
1880                esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1881                break;
1882        }
1883        case ESP_EVENT_MSGOUT_DONE:
1884                if (esp->rev == FASHME) {
1885                        scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1886                } else {
1887                        if (esp->msg_out_len > 1)
1888                                esp->ops->dma_invalidate(esp);
1889                }
1890
1891                if (!(esp->ireg & ESP_INTR_DC)) {
1892                        if (esp->rev != FASHME)
1893                                scsi_esp_cmd(esp, ESP_CMD_NULL);
1894                }
1895                esp_event(esp, ESP_EVENT_CHECK_PHASE);
1896                goto again;
1897        case ESP_EVENT_MSGIN:
1898                if (esp->ireg & ESP_INTR_BSERV) {
1899                        if (esp->rev == FASHME) {
1900                                if (!(esp_read8(ESP_STATUS2) &
1901                                      ESP_STAT2_FEMPTY))
1902                                        scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1903                        } else {
1904                                scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1905                                if (esp->rev == ESP100)
1906                                        scsi_esp_cmd(esp, ESP_CMD_NULL);
1907                        }
1908                        scsi_esp_cmd(esp, ESP_CMD_TI);
1909                        esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1910                        return 1;
1911                }
1912                if (esp->ireg & ESP_INTR_FDONE) {
1913                        u8 val;
1914
1915                        if (esp->rev == FASHME)
1916                                val = esp->fifo[0];
1917                        else
1918                                val = esp_read8(ESP_FDATA);
1919                        esp->msg_in[esp->msg_in_len++] = val;
1920
1921                        esp_log_msgin("ESP: Got msgin byte %x\n", val);
1922
1923                        if (!esp_msgin_process(esp))
1924                                esp->msg_in_len = 0;
1925
1926                        if (esp->rev == FASHME)
1927                                scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1928
1929                        scsi_esp_cmd(esp, ESP_CMD_MOK);
1930
1931                        if (esp->event != ESP_EVENT_FREE_BUS)
1932                                esp_event(esp, ESP_EVENT_CHECK_PHASE);
1933                } else {
1934                        printk("ESP: MSGIN neither BSERV not FDON, resetting");
1935                        esp_schedule_reset(esp);
1936                        return 0;
1937                }
1938                break;
1939        case ESP_EVENT_CMD_START:
1940                memcpy(esp->command_block, esp->cmd_bytes_ptr,
1941                       esp->cmd_bytes_left);
1942                if (esp->rev == FASHME)
1943                        scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1944                esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1945                                       esp->cmd_bytes_left, 16, 0,
1946                                       ESP_CMD_DMA | ESP_CMD_TI);
1947                esp_event(esp, ESP_EVENT_CMD_DONE);
1948                esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1949                break;
1950        case ESP_EVENT_CMD_DONE:
1951                esp->ops->dma_invalidate(esp);
1952                if (esp->ireg & ESP_INTR_BSERV) {
1953                        esp_event(esp, ESP_EVENT_CHECK_PHASE);
1954                        goto again;
1955                }
1956                esp_schedule_reset(esp);
1957                return 0;
1958                break;
1959
1960        case ESP_EVENT_RESET:
1961                scsi_esp_cmd(esp, ESP_CMD_RS);
1962                break;
1963
1964        default:
1965                printk("ESP: Unexpected event %x, resetting\n",
1966                       esp->event);
1967                esp_schedule_reset(esp);
1968                return 0;
1969                break;
1970        }
1971        return 1;
1972}
1973
1974static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
1975{
1976        struct scsi_cmnd *cmd = ent->cmd;
1977
1978        esp_unmap_dma(esp, cmd);
1979        esp_free_lun_tag(ent, cmd->device->hostdata);
1980        cmd->result = DID_RESET << 16;
1981
1982        if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
1983                esp->ops->unmap_single(esp, ent->sense_dma,
1984                                       SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1985                ent->sense_ptr = NULL;
1986        }
1987
1988        cmd->scsi_done(cmd);
1989        list_del(&ent->list);
1990        esp_put_ent(esp, ent);
1991}
1992
1993static void esp_clear_hold(struct scsi_device *dev, void *data)
1994{
1995        struct esp_lun_data *lp = dev->hostdata;
1996
1997        BUG_ON(lp->num_tagged);
1998        lp->hold = 0;
1999}
2000
2001static void esp_reset_cleanup(struct esp *esp)
2002{
2003        struct esp_cmd_entry *ent, *tmp;
2004        int i;
2005
2006        list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2007                struct scsi_cmnd *cmd = ent->cmd;
2008
2009                list_del(&ent->list);
2010                cmd->result = DID_RESET << 16;
2011                cmd->scsi_done(cmd);
2012                esp_put_ent(esp, ent);
2013        }
2014
2015        list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2016                if (ent == esp->active_cmd)
2017                        esp->active_cmd = NULL;
2018                esp_reset_cleanup_one(esp, ent);
2019        }
2020
2021        BUG_ON(esp->active_cmd != NULL);
2022
2023        /* Force renegotiation of sync/wide transfers.  */
2024        for (i = 0; i < ESP_MAX_TARGET; i++) {
2025                struct esp_target_data *tp = &esp->target[i];
2026
2027                tp->esp_period = 0;
2028                tp->esp_offset = 0;
2029                tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2030                                     ESP_CONFIG3_FSCSI |
2031                                     ESP_CONFIG3_FAST);
2032                tp->flags &= ~ESP_TGT_WIDE;
2033                tp->flags |= ESP_TGT_CHECK_NEGO;
2034
2035                if (tp->starget)
2036                        __starget_for_each_device(tp->starget, NULL,
2037                                                  esp_clear_hold);
2038        }
2039        esp->flags &= ~ESP_FLAG_RESETTING;
2040}
2041
2042/* Runs under host->lock */
2043static void __esp_interrupt(struct esp *esp)
2044{
2045        int finish_reset, intr_done;
2046        u8 phase;
2047
2048        esp->sreg = esp_read8(ESP_STATUS);
2049
2050        if (esp->flags & ESP_FLAG_RESETTING) {
2051                finish_reset = 1;
2052        } else {
2053                if (esp_check_gross_error(esp))
2054                        return;
2055
2056                finish_reset = esp_check_spur_intr(esp);
2057                if (finish_reset < 0)
2058                        return;
2059        }
2060
2061        esp->ireg = esp_read8(ESP_INTRPT);
2062
2063        if (esp->ireg & ESP_INTR_SR)
2064                finish_reset = 1;
2065
2066        if (finish_reset) {
2067                esp_reset_cleanup(esp);
2068                if (esp->eh_reset) {
2069                        complete(esp->eh_reset);
2070                        esp->eh_reset = NULL;
2071                }
2072                return;
2073        }
2074
2075        phase = (esp->sreg & ESP_STAT_PMASK);
2076        if (esp->rev == FASHME) {
2077                if (((phase != ESP_DIP && phase != ESP_DOP) &&
2078                     esp->select_state == ESP_SELECT_NONE &&
2079                     esp->event != ESP_EVENT_STATUS &&
2080                     esp->event != ESP_EVENT_DATA_DONE) ||
2081                    (esp->ireg & ESP_INTR_RSEL)) {
2082                        esp->sreg2 = esp_read8(ESP_STATUS2);
2083                        if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2084                            (esp->sreg2 & ESP_STAT2_F1BYTE))
2085                                hme_read_fifo(esp);
2086                }
2087        }
2088
2089        esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] "
2090                     "sreg2[%02x] ireg[%02x]\n",
2091                     esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2092
2093        intr_done = 0;
2094
2095        if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
2096                printk("ESP: unexpected IREG %02x\n", esp->ireg);
2097                if (esp->ireg & ESP_INTR_IC)
2098                        esp_dump_cmd_log(esp);
2099
2100                esp_schedule_reset(esp);
2101        } else {
2102                if (!(esp->ireg & ESP_INTR_RSEL)) {
2103                        /* Some combination of FDONE, BSERV, DC.  */
2104                        if (esp->select_state != ESP_SELECT_NONE)
2105                                intr_done = esp_finish_select(esp);
2106                } else if (esp->ireg & ESP_INTR_RSEL) {
2107                        if (esp->active_cmd)
2108                                (void) esp_finish_select(esp);
2109                        intr_done = esp_reconnect(esp);
2110                }
2111        }
2112        while (!intr_done)
2113                intr_done = esp_process_event(esp);
2114}
2115
2116irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2117{
2118        struct esp *esp = dev_id;
2119        unsigned long flags;
2120        irqreturn_t ret;
2121
2122        spin_lock_irqsave(esp->host->host_lock, flags);
2123        ret = IRQ_NONE;
2124        if (esp->ops->irq_pending(esp)) {
2125                ret = IRQ_HANDLED;
2126                for (;;) {
2127                        int i;
2128
2129                        __esp_interrupt(esp);
2130                        if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2131                                break;
2132                        esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2133
2134                        for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2135                                if (esp->ops->irq_pending(esp))
2136                                        break;
2137                        }
2138                        if (i == ESP_QUICKIRQ_LIMIT)
2139                                break;
2140                }
2141        }
2142        spin_unlock_irqrestore(esp->host->host_lock, flags);
2143
2144        return ret;
2145}
2146EXPORT_SYMBOL(scsi_esp_intr);
2147
2148static void esp_get_revision(struct esp *esp)
2149{
2150        u8 val;
2151
2152        esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2153        esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2154        esp_write8(esp->config2, ESP_CFG2);
2155
2156        val = esp_read8(ESP_CFG2);
2157        val &= ~ESP_CONFIG2_MAGIC;
2158        if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2159                /* If what we write to cfg2 does not come back, cfg2 is not
2160                 * implemented, therefore this must be a plain esp100.
2161                 */
2162                esp->rev = ESP100;
2163        } else {
2164                esp->config2 = 0;
2165                esp_set_all_config3(esp, 5);
2166                esp->prev_cfg3 = 5;
2167                esp_write8(esp->config2, ESP_CFG2);
2168                esp_write8(0, ESP_CFG3);
2169                esp_write8(esp->prev_cfg3, ESP_CFG3);
2170
2171                val = esp_read8(ESP_CFG3);
2172                if (val != 5) {
2173                        /* The cfg2 register is implemented, however
2174                         * cfg3 is not, must be esp100a.
2175                         */
2176                        esp->rev = ESP100A;
2177                } else {
2178                        esp_set_all_config3(esp, 0);
2179                        esp->prev_cfg3 = 0;
2180                        esp_write8(esp->prev_cfg3, ESP_CFG3);
2181
2182                        /* All of cfg{1,2,3} implemented, must be one of
2183                         * the fas variants, figure out which one.
2184                         */
2185                        if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2186                                esp->rev = FAST;
2187                                esp->sync_defp = SYNC_DEFP_FAST;
2188                        } else {
2189                                esp->rev = ESP236;
2190                        }
2191                        esp->config2 = 0;
2192                        esp_write8(esp->config2, ESP_CFG2);
2193                }
2194        }
2195}
2196
2197static void esp_init_swstate(struct esp *esp)
2198{
2199        int i;
2200
2201        INIT_LIST_HEAD(&esp->queued_cmds);
2202        INIT_LIST_HEAD(&esp->active_cmds);
2203        INIT_LIST_HEAD(&esp->esp_cmd_pool);
2204
2205        /* Start with a clear state, domain validation (via ->slave_configure,
2206         * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2207         * commands.
2208         */
2209        for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2210                esp->target[i].flags = 0;
2211                esp->target[i].nego_goal_period = 0;
2212                esp->target[i].nego_goal_offset = 0;
2213                esp->target[i].nego_goal_width = 0;
2214                esp->target[i].nego_goal_tags = 0;
2215        }
2216}
2217
2218/* This places the ESP into a known state at boot time. */
2219static void esp_bootup_reset(struct esp *esp)
2220{
2221        u8 val;
2222
2223        /* Reset the DMA */
2224        esp->ops->reset_dma(esp);
2225
2226        /* Reset the ESP */
2227        esp_reset_esp(esp);
2228
2229        /* Reset the SCSI bus, but tell ESP not to generate an irq */
2230        val = esp_read8(ESP_CFG1);
2231        val |= ESP_CONFIG1_SRRDISAB;
2232        esp_write8(val, ESP_CFG1);
2233
2234        scsi_esp_cmd(esp, ESP_CMD_RS);
2235        udelay(400);
2236
2237        esp_write8(esp->config1, ESP_CFG1);
2238
2239        /* Eat any bitrot in the chip and we are done... */
2240        esp_read8(ESP_INTRPT);
2241}
2242
2243static void esp_set_clock_params(struct esp *esp)
2244{
2245        int fhz;
2246        u8 ccf;
2247
2248        /* This is getting messy but it has to be done correctly or else
2249         * you get weird behavior all over the place.  We are trying to
2250         * basically figure out three pieces of information.
2251         *
2252         * a) Clock Conversion Factor
2253         *
2254         *    This is a representation of the input crystal clock frequency
2255         *    going into the ESP on this machine.  Any operation whose timing
2256         *    is longer than 400ns depends on this value being correct.  For
2257         *    example, you'll get blips for arbitration/selection during high
2258         *    load or with multiple targets if this is not set correctly.
2259         *
2260         * b) Selection Time-Out
2261         *
2262         *    The ESP isn't very bright and will arbitrate for the bus and try
2263         *    to select a target forever if you let it.  This value tells the
2264         *    ESP when it has taken too long to negotiate and that it should
2265         *    interrupt the CPU so we can see what happened.  The value is
2266         *    computed as follows (from NCR/Symbios chip docs).
2267         *
2268         *          (Time Out Period) *  (Input Clock)
2269         *    STO = ----------------------------------
2270         *          (8192) * (Clock Conversion Factor)
2271         *
2272         *    We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2273         *
2274         * c) Imperical constants for synchronous offset and transfer period
2275         *    register values
2276         *
2277         *    This entails the smallest and largest sync period we could ever
2278         *    handle on this ESP.
2279         */
2280        fhz = esp->cfreq;
2281
2282        ccf = ((fhz / 1000000) + 4) / 5;
2283        if (ccf == 1)
2284                ccf = 2;
2285
2286        /* If we can't find anything reasonable, just assume 20MHZ.
2287         * This is the clock frequency of the older sun4c's where I've
2288         * been unable to find the clock-frequency PROM property.  All
2289         * other machines provide useful values it seems.
2290         */
2291        if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2292                fhz = 20000000;
2293                ccf = 4;
2294        }
2295
2296        esp->cfact = (ccf == 8 ? 0 : ccf);
2297        esp->cfreq = fhz;
2298        esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
2299        esp->ctick = ESP_TICK(ccf, esp->ccycle);
2300        esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
2301        esp->sync_defp = SYNC_DEFP_SLOW;
2302}
2303
2304static const char *esp_chip_names[] = {
2305        "ESP100",
2306        "ESP100A",
2307        "ESP236",
2308        "FAS236",
2309        "FAS100A",
2310        "FAST",
2311        "FASHME",
2312};
2313
2314static struct scsi_transport_template *esp_transport_template;
2315
2316int scsi_esp_register(struct esp *esp, struct device *dev)
2317{
2318        static int instance;
2319        int err;
2320
2321        esp->host->transportt = esp_transport_template;
2322        esp->host->max_lun = ESP_MAX_LUN;
2323        esp->host->cmd_per_lun = 2;
2324        esp->host->unique_id = instance;
2325
2326        esp_set_clock_params(esp);
2327
2328        esp_get_revision(esp);
2329
2330        esp_init_swstate(esp);
2331
2332        esp_bootup_reset(esp);
2333
2334        printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n",
2335               esp->host->unique_id, esp->regs, esp->dma_regs,
2336               esp->host->irq);
2337        printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2338               esp->host->unique_id, esp_chip_names[esp->rev],
2339               esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
2340
2341        /* Let the SCSI bus reset settle. */
2342        ssleep(esp_bus_reset_settle);
2343
2344        err = scsi_add_host(esp->host, dev);
2345        if (err)
2346                return err;
2347
2348        instance++;
2349
2350        scsi_scan_host(esp->host);
2351
2352        return 0;
2353}
2354EXPORT_SYMBOL(scsi_esp_register);
2355
2356void scsi_esp_unregister(struct esp *esp)
2357{
2358        scsi_remove_host(esp->host);
2359}
2360EXPORT_SYMBOL(scsi_esp_unregister);
2361
2362static int esp_target_alloc(struct scsi_target *starget)
2363{
2364        struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2365        struct esp_target_data *tp = &esp->target[starget->id];
2366
2367        tp->starget = starget;
2368
2369        return 0;
2370}
2371
2372static void esp_target_destroy(struct scsi_target *starget)
2373{
2374        struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2375        struct esp_target_data *tp = &esp->target[starget->id];
2376
2377        tp->starget = NULL;
2378}
2379
2380static int esp_slave_alloc(struct scsi_device *dev)
2381{
2382        struct esp *esp = shost_priv(dev->host);
2383        struct esp_target_data *tp = &esp->target[dev->id];
2384        struct esp_lun_data *lp;
2385
2386        lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2387        if (!lp)
2388                return -ENOMEM;
2389        dev->hostdata = lp;
2390
2391        spi_min_period(tp->starget) = esp->min_period;
2392        spi_max_offset(tp->starget) = 15;
2393
2394        if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2395                spi_max_width(tp->starget) = 1;
2396        else
2397                spi_max_width(tp->starget) = 0;
2398
2399        return 0;
2400}
2401
2402static int esp_slave_configure(struct scsi_device *dev)
2403{
2404        struct esp *esp = shost_priv(dev->host);
2405        struct esp_target_data *tp = &esp->target[dev->id];
2406        int goal_tags, queue_depth;
2407
2408        if (esp->flags & ESP_FLAG_DISABLE_SYNC) {
2409                /* Bypass async domain validation */
2410                dev->ppr  = 0;
2411                dev->sdtr = 0;
2412        }
2413
2414        goal_tags = 0;
2415
2416        if (dev->tagged_supported) {
2417                /* XXX make this configurable somehow XXX */
2418                goal_tags = ESP_DEFAULT_TAGS;
2419
2420                if (goal_tags > ESP_MAX_TAG)
2421                        goal_tags = ESP_MAX_TAG;
2422        }
2423
2424        queue_depth = goal_tags;
2425        if (queue_depth < dev->host->cmd_per_lun)
2426                queue_depth = dev->host->cmd_per_lun;
2427
2428        if (goal_tags) {
2429                scsi_set_tag_type(dev, MSG_ORDERED_TAG);
2430                scsi_activate_tcq(dev, queue_depth);
2431        } else {
2432                scsi_deactivate_tcq(dev, queue_depth);
2433        }
2434        tp->flags |= ESP_TGT_DISCONNECT;
2435
2436        if (!spi_initial_dv(dev->sdev_target))
2437                spi_dv_device(dev);
2438
2439        return 0;
2440}
2441
2442static void esp_slave_destroy(struct scsi_device *dev)
2443{
2444        struct esp_lun_data *lp = dev->hostdata;
2445
2446        kfree(lp);
2447        dev->hostdata = NULL;
2448}
2449
2450static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2451{
2452        struct esp *esp = shost_priv(cmd->device->host);
2453        struct esp_cmd_entry *ent, *tmp;
2454        struct completion eh_done;
2455        unsigned long flags;
2456
2457        /* XXX This helps a lot with debugging but might be a bit
2458         * XXX much for the final driver.
2459         */
2460        spin_lock_irqsave(esp->host->host_lock, flags);
2461        printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n",
2462               esp->host->unique_id, cmd, cmd->cmnd[0]);
2463        ent = esp->active_cmd;
2464        if (ent)
2465                printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n",
2466                       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2467        list_for_each_entry(ent, &esp->queued_cmds, list) {
2468                printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n",
2469                       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2470        }
2471        list_for_each_entry(ent, &esp->active_cmds, list) {
2472                printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n",
2473                       esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
2474        }
2475        esp_dump_cmd_log(esp);
2476        spin_unlock_irqrestore(esp->host->host_lock, flags);
2477
2478        spin_lock_irqsave(esp->host->host_lock, flags);
2479
2480        ent = NULL;
2481        list_for_each_entry(tmp, &esp->queued_cmds, list) {
2482                if (tmp->cmd == cmd) {
2483                        ent = tmp;
2484                        break;
2485                }
2486        }
2487
2488        if (ent) {
2489                /* Easiest case, we didn't even issue the command
2490                 * yet so it is trivial to abort.
2491                 */
2492                list_del(&ent->list);
2493
2494                cmd->result = DID_ABORT << 16;
2495                cmd->scsi_done(cmd);
2496
2497                esp_put_ent(esp, ent);
2498
2499                goto out_success;
2500        }
2501
2502        init_completion(&eh_done);
2503
2504        ent = esp->active_cmd;
2505        if (ent && ent->cmd == cmd) {
2506                /* Command is the currently active command on
2507                 * the bus.  If we already have an output message
2508                 * pending, no dice.
2509                 */
2510                if (esp->msg_out_len)
2511                        goto out_failure;
2512
2513                /* Send out an abort, encouraging the target to
2514                 * go to MSGOUT phase by asserting ATN.
2515                 */
2516                esp->msg_out[0] = ABORT_TASK_SET;
2517                esp->msg_out_len = 1;
2518                ent->eh_done = &eh_done;
2519
2520                scsi_esp_cmd(esp, ESP_CMD_SATN);
2521        } else {
2522                /* The command is disconnected.  This is not easy to
2523                 * abort.  For now we fail and let the scsi error
2524                 * handling layer go try a scsi bus reset or host
2525                 * reset.
2526                 *
2527                 * What we could do is put together a scsi command
2528                 * solely for the purpose of sending an abort message
2529                 * to the target.  Coming up with all the code to
2530                 * cook up scsi commands, special case them everywhere,
2531                 * etc. is for questionable gain and it would be better
2532                 * if the generic scsi error handling layer could do at
2533                 * least some of that for us.
2534                 *
2535                 * Anyways this is an area for potential future improvement
2536                 * in this driver.
2537                 */
2538                goto out_failure;
2539        }
2540
2541        spin_unlock_irqrestore(esp->host->host_lock, flags);
2542
2543        if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2544                spin_lock_irqsave(esp->host->host_lock, flags);
2545                ent->eh_done = NULL;
2546                spin_unlock_irqrestore(esp->host->host_lock, flags);
2547
2548                return FAILED;
2549        }
2550
2551        return SUCCESS;
2552
2553out_success:
2554        spin_unlock_irqrestore(esp->host->host_lock, flags);
2555        return SUCCESS;
2556
2557out_failure:
2558        /* XXX This might be a good location to set ESP_TGT_BROKEN
2559         * XXX since we know which target/lun in particular is
2560         * XXX causing trouble.
2561         */
2562        spin_unlock_irqrestore(esp->host->host_lock, flags);
2563        return FAILED;
2564}
2565
2566static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2567{
2568        struct esp *esp = shost_priv(cmd->device->host);
2569        struct completion eh_reset;
2570        unsigned long flags;
2571
2572        init_completion(&eh_reset);
2573
2574        spin_lock_irqsave(esp->host->host_lock, flags);
2575
2576        esp->eh_reset = &eh_reset;
2577
2578        /* XXX This is too simple... We should add lots of
2579         * XXX checks here so that if we find that the chip is
2580         * XXX very wedged we return failure immediately so
2581         * XXX that we can perform a full chip reset.
2582         */
2583        esp->flags |= ESP_FLAG_RESETTING;
2584        scsi_esp_cmd(esp, ESP_CMD_RS);
2585
2586        spin_unlock_irqrestore(esp->host->host_lock, flags);
2587
2588        ssleep(esp_bus_reset_settle);
2589
2590        if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2591                spin_lock_irqsave(esp->host->host_lock, flags);
2592                esp->eh_reset = NULL;
2593                spin_unlock_irqrestore(esp->host->host_lock, flags);
2594
2595                return FAILED;
2596        }
2597
2598        return SUCCESS;
2599}
2600
2601/* All bets are off, reset the entire device.  */
2602static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2603{
2604        struct esp *esp = shost_priv(cmd->device->host);
2605        unsigned long flags;
2606
2607        spin_lock_irqsave(esp->host->host_lock, flags);
2608        esp_bootup_reset(esp);
2609        esp_reset_cleanup(esp);
2610        spin_unlock_irqrestore(esp->host->host_lock, flags);
2611
2612        ssleep(esp_bus_reset_settle);
2613
2614        return SUCCESS;
2615}
2616
2617static const char *esp_info(struct Scsi_Host *host)
2618{
2619        return "esp";
2620}
2621
2622struct scsi_host_template scsi_esp_template = {
2623        .module                 = THIS_MODULE,
2624        .name                   = "esp",
2625        .info                   = esp_info,
2626        .queuecommand           = esp_queuecommand,
2627        .target_alloc           = esp_target_alloc,
2628        .target_destroy         = esp_target_destroy,
2629        .slave_alloc            = esp_slave_alloc,
2630        .slave_configure        = esp_slave_configure,
2631        .slave_destroy          = esp_slave_destroy,
2632        .eh_abort_handler       = esp_eh_abort_handler,
2633        .eh_bus_reset_handler   = esp_eh_bus_reset_handler,
2634        .eh_host_reset_handler  = esp_eh_host_reset_handler,
2635        .can_queue              = 7,
2636        .this_id                = 7,
2637        .sg_tablesize           = SG_ALL,
2638        .use_clustering         = ENABLE_CLUSTERING,
2639        .max_sectors            = 0xffff,
2640        .skip_settle_delay      = 1,
2641};
2642EXPORT_SYMBOL(scsi_esp_template);
2643
2644static void esp_get_signalling(struct Scsi_Host *host)
2645{
2646        struct esp *esp = shost_priv(host);
2647        enum spi_signal_type type;
2648
2649        if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2650                type = SPI_SIGNAL_HVD;
2651        else
2652                type = SPI_SIGNAL_SE;
2653
2654        spi_signalling(host) = type;
2655}
2656
2657static void esp_set_offset(struct scsi_target *target, int offset)
2658{
2659        struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2660        struct esp *esp = shost_priv(host);
2661        struct esp_target_data *tp = &esp->target[target->id];
2662
2663        tp->nego_goal_offset = offset;
2664        tp->flags |= ESP_TGT_CHECK_NEGO;
2665}
2666
2667static void esp_set_period(struct scsi_target *target, int period)
2668{
2669        struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2670        struct esp *esp = shost_priv(host);
2671        struct esp_target_data *tp = &esp->target[target->id];
2672
2673        tp->nego_goal_period = period;
2674        tp->flags |= ESP_TGT_CHECK_NEGO;
2675}
2676
2677static void esp_set_width(struct scsi_target *target, int width)
2678{
2679        struct Scsi_Host *host = dev_to_shost(target->dev.parent);
2680        struct esp *esp = shost_priv(host);
2681        struct esp_target_data *tp = &esp->target[target->id];
2682
2683        tp->nego_goal_width = (width ? 1 : 0);
2684        tp->flags |= ESP_TGT_CHECK_NEGO;
2685}
2686
2687static struct spi_function_template esp_transport_ops = {
2688        .set_offset             = esp_set_offset,
2689        .show_offset            = 1,
2690        .set_period             = esp_set_period,
2691        .show_period            = 1,
2692        .set_width              = esp_set_width,
2693        .show_width             = 1,
2694        .get_signalling         = esp_get_signalling,
2695};
2696
2697static int __init esp_init(void)
2698{
2699        BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2700                     sizeof(struct esp_cmd_priv));
2701
2702        esp_transport_template = spi_attach_transport(&esp_transport_ops);
2703        if (!esp_transport_template)
2704                return -ENODEV;
2705
2706        return 0;
2707}
2708
2709static void __exit esp_exit(void)
2710{
2711        spi_release_transport(esp_transport_template);
2712}
2713
2714MODULE_DESCRIPTION("ESP SCSI driver core");
2715MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2716MODULE_LICENSE("GPL");
2717MODULE_VERSION(DRV_VERSION);
2718
2719module_param(esp_bus_reset_settle, int, 0);
2720MODULE_PARM_DESC(esp_bus_reset_settle,
2721                 "ESP scsi bus reset delay in seconds");
2722
2723module_param(esp_debug, int, 0);
2724MODULE_PARM_DESC(esp_debug,
2725"ESP bitmapped debugging message enable value:\n"
2726"       0x00000001      Log interrupt events\n"
2727"       0x00000002      Log scsi commands\n"
2728"       0x00000004      Log resets\n"
2729"       0x00000008      Log message in events\n"
2730"       0x00000010      Log message out events\n"
2731"       0x00000020      Log command completion\n"
2732"       0x00000040      Log disconnects\n"
2733"       0x00000080      Log data start\n"
2734"       0x00000100      Log data done\n"
2735"       0x00000200      Log reconnects\n"
2736"       0x00000400      Log auto-sense data\n"
2737);
2738
2739module_init(esp_init);
2740module_exit(esp_exit);
2741