qemu/hw/scsi/esp.c
<<
>>
Prefs
   1/*
   2 * QEMU ESP/NCR53C9x emulation
   3 *
   4 * Copyright (c) 2005-2006 Fabrice Bellard
   5 * Copyright (c) 2012 Herve Poussineau
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a copy
   8 * of this software and associated documentation files (the "Software"), to deal
   9 * in the Software without restriction, including without limitation the rights
  10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11 * copies of the Software, and to permit persons to whom the Software is
  12 * furnished to do so, subject to the following conditions:
  13 *
  14 * The above copyright notice and this permission notice shall be included in
  15 * all copies or substantial portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23 * THE SOFTWARE.
  24 */
  25
  26#include "qemu/osdep.h"
  27#include "hw/sysbus.h"
  28#include "migration/vmstate.h"
  29#include "hw/irq.h"
  30#include "hw/scsi/esp.h"
  31#include "trace.h"
  32#include "qemu/log.h"
  33#include "qemu/module.h"
  34
  35/*
  36 * On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
  37 * also produced as NCR89C100. See
  38 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR89C100.txt
  39 * and
  40 * http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
  41 *
  42 * On Macintosh Quadra it is a NCR53C96.
  43 */
  44
  45static void esp_raise_irq(ESPState *s)
  46{
  47    if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
  48        s->rregs[ESP_RSTAT] |= STAT_INT;
  49        qemu_irq_raise(s->irq);
  50        trace_esp_raise_irq();
  51    }
  52}
  53
  54static void esp_lower_irq(ESPState *s)
  55{
  56    if (s->rregs[ESP_RSTAT] & STAT_INT) {
  57        s->rregs[ESP_RSTAT] &= ~STAT_INT;
  58        qemu_irq_lower(s->irq);
  59        trace_esp_lower_irq();
  60    }
  61}
  62
  63static void esp_raise_drq(ESPState *s)
  64{
  65    qemu_irq_raise(s->irq_data);
  66}
  67
  68static void esp_lower_drq(ESPState *s)
  69{
  70    qemu_irq_lower(s->irq_data);
  71}
  72
  73void esp_dma_enable(ESPState *s, int irq, int level)
  74{
  75    if (level) {
  76        s->dma_enabled = 1;
  77        trace_esp_dma_enable();
  78        if (s->dma_cb) {
  79            s->dma_cb(s);
  80            s->dma_cb = NULL;
  81        }
  82    } else {
  83        trace_esp_dma_disable();
  84        s->dma_enabled = 0;
  85    }
  86}
  87
  88void esp_request_cancelled(SCSIRequest *req)
  89{
  90    ESPState *s = req->hba_private;
  91
  92    if (req == s->current_req) {
  93        scsi_req_unref(s->current_req);
  94        s->current_req = NULL;
  95        s->current_dev = NULL;
  96    }
  97}
  98
  99static void set_pdma(ESPState *s, enum pdma_origin_id origin,
 100                     uint32_t index, uint32_t len)
 101{
 102    s->pdma_origin = origin;
 103    s->pdma_start = index;
 104    s->pdma_cur = index;
 105    s->pdma_len = len;
 106}
 107
 108static uint8_t *get_pdma_buf(ESPState *s)
 109{
 110    switch (s->pdma_origin) {
 111    case PDMA:
 112        return s->pdma_buf;
 113    case TI:
 114        return s->ti_buf;
 115    case CMD:
 116        return s->cmdbuf;
 117    case ASYNC:
 118        return s->async_buf;
 119    }
 120    return NULL;
 121}
 122
 123static int get_cmd_cb(ESPState *s)
 124{
 125    int target;
 126
 127    target = s->wregs[ESP_WBUSID] & BUSID_DID;
 128
 129    s->ti_size = 0;
 130    s->ti_rptr = 0;
 131    s->ti_wptr = 0;
 132
 133    if (s->current_req) {
 134        /* Started a new command before the old one finished.  Cancel it.  */
 135        scsi_req_cancel(s->current_req);
 136        s->async_len = 0;
 137    }
 138
 139    s->current_dev = scsi_device_find(&s->bus, 0, target, 0);
 140    if (!s->current_dev) {
 141        /* No such drive */
 142        s->rregs[ESP_RSTAT] = 0;
 143        s->rregs[ESP_RINTR] = INTR_DC;
 144        s->rregs[ESP_RSEQ] = SEQ_0;
 145        esp_raise_irq(s);
 146        return -1;
 147    }
 148    return 0;
 149}
 150
 151static uint32_t get_cmd(ESPState *s, uint8_t *buf, uint8_t buflen)
 152{
 153    uint32_t dmalen;
 154    int target;
 155
 156    target = s->wregs[ESP_WBUSID] & BUSID_DID;
 157    if (s->dma) {
 158        dmalen = s->rregs[ESP_TCLO];
 159        dmalen |= s->rregs[ESP_TCMID] << 8;
 160        dmalen |= s->rregs[ESP_TCHI] << 16;
 161        if (dmalen > buflen) {
 162            return 0;
 163        }
 164        if (s->dma_memory_read) {
 165            s->dma_memory_read(s->dma_opaque, buf, dmalen);
 166        } else {
 167            memcpy(s->pdma_buf, buf, dmalen);
 168            set_pdma(s, PDMA, 0, dmalen);
 169            esp_raise_drq(s);
 170            return 0;
 171        }
 172    } else {
 173        dmalen = s->ti_size;
 174        if (dmalen > TI_BUFSZ) {
 175            return 0;
 176        }
 177        memcpy(buf, s->ti_buf, dmalen);
 178        buf[0] = buf[2] >> 5;
 179    }
 180    trace_esp_get_cmd(dmalen, target);
 181
 182    if (get_cmd_cb(s) < 0) {
 183        return 0;
 184    }
 185    return dmalen;
 186}
 187
 188static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
 189{
 190    int32_t datalen;
 191    int lun;
 192    SCSIDevice *current_lun;
 193
 194    trace_esp_do_busid_cmd(busid);
 195    lun = busid & 7;
 196    current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
 197    s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
 198    datalen = scsi_req_enqueue(s->current_req);
 199    s->ti_size = datalen;
 200    if (datalen != 0) {
 201        s->rregs[ESP_RSTAT] = STAT_TC;
 202        s->dma_left = 0;
 203        s->dma_counter = 0;
 204        if (datalen > 0) {
 205            s->rregs[ESP_RSTAT] |= STAT_DI;
 206        } else {
 207            s->rregs[ESP_RSTAT] |= STAT_DO;
 208        }
 209        scsi_req_continue(s->current_req);
 210    }
 211    s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
 212    s->rregs[ESP_RSEQ] = SEQ_CD;
 213    esp_raise_irq(s);
 214}
 215
 216static void do_cmd(ESPState *s, uint8_t *buf)
 217{
 218    uint8_t busid = buf[0];
 219
 220    do_busid_cmd(s, &buf[1], busid);
 221}
 222
 223static void satn_pdma_cb(ESPState *s)
 224{
 225    if (get_cmd_cb(s) < 0) {
 226        return;
 227    }
 228    if (s->pdma_cur != s->pdma_start) {
 229        do_cmd(s, get_pdma_buf(s) + s->pdma_start);
 230    }
 231}
 232
 233static void handle_satn(ESPState *s)
 234{
 235    uint8_t buf[32];
 236    int len;
 237
 238    if (s->dma && !s->dma_enabled) {
 239        s->dma_cb = handle_satn;
 240        return;
 241    }
 242    s->pdma_cb = satn_pdma_cb;
 243    len = get_cmd(s, buf, sizeof(buf));
 244    if (len)
 245        do_cmd(s, buf);
 246}
 247
 248static void s_without_satn_pdma_cb(ESPState *s)
 249{
 250    if (get_cmd_cb(s) < 0) {
 251        return;
 252    }
 253    if (s->pdma_cur != s->pdma_start) {
 254        do_busid_cmd(s, get_pdma_buf(s) + s->pdma_start, 0);
 255    }
 256}
 257
 258static void handle_s_without_atn(ESPState *s)
 259{
 260    uint8_t buf[32];
 261    int len;
 262
 263    if (s->dma && !s->dma_enabled) {
 264        s->dma_cb = handle_s_without_atn;
 265        return;
 266    }
 267    s->pdma_cb = s_without_satn_pdma_cb;
 268    len = get_cmd(s, buf, sizeof(buf));
 269    if (len) {
 270        do_busid_cmd(s, buf, 0);
 271    }
 272}
 273
 274static void satn_stop_pdma_cb(ESPState *s)
 275{
 276    if (get_cmd_cb(s) < 0) {
 277        return;
 278    }
 279    s->cmdlen = s->pdma_cur - s->pdma_start;
 280    if (s->cmdlen) {
 281        trace_esp_handle_satn_stop(s->cmdlen);
 282        s->do_cmd = 1;
 283        s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
 284        s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
 285        s->rregs[ESP_RSEQ] = SEQ_CD;
 286        esp_raise_irq(s);
 287    }
 288}
 289
 290static void handle_satn_stop(ESPState *s)
 291{
 292    if (s->dma && !s->dma_enabled) {
 293        s->dma_cb = handle_satn_stop;
 294        return;
 295    }
 296    s->pdma_cb = satn_stop_pdma_cb;
 297    s->cmdlen = get_cmd(s, s->cmdbuf, sizeof(s->cmdbuf));
 298    if (s->cmdlen) {
 299        trace_esp_handle_satn_stop(s->cmdlen);
 300        s->do_cmd = 1;
 301        s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD;
 302        s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
 303        s->rregs[ESP_RSEQ] = SEQ_CD;
 304        esp_raise_irq(s);
 305    }
 306}
 307
 308static void write_response_pdma_cb(ESPState *s)
 309{
 310    s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
 311    s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
 312    s->rregs[ESP_RSEQ] = SEQ_CD;
 313    esp_raise_irq(s);
 314}
 315
 316static void write_response(ESPState *s)
 317{
 318    trace_esp_write_response(s->status);
 319    s->ti_buf[0] = s->status;
 320    s->ti_buf[1] = 0;
 321    if (s->dma) {
 322        if (s->dma_memory_write) {
 323            s->dma_memory_write(s->dma_opaque, s->ti_buf, 2);
 324            s->rregs[ESP_RSTAT] = STAT_TC | STAT_ST;
 325            s->rregs[ESP_RINTR] = INTR_BS | INTR_FC;
 326            s->rregs[ESP_RSEQ] = SEQ_CD;
 327        } else {
 328            set_pdma(s, TI, 0, 2);
 329            s->pdma_cb = write_response_pdma_cb;
 330            esp_raise_drq(s);
 331            return;
 332        }
 333    } else {
 334        s->ti_size = 2;
 335        s->ti_rptr = 0;
 336        s->ti_wptr = 2;
 337        s->rregs[ESP_RFLAGS] = 2;
 338    }
 339    esp_raise_irq(s);
 340}
 341
 342static void esp_dma_done(ESPState *s)
 343{
 344    s->rregs[ESP_RSTAT] |= STAT_TC;
 345    s->rregs[ESP_RINTR] = INTR_BS;
 346    s->rregs[ESP_RSEQ] = 0;
 347    s->rregs[ESP_RFLAGS] = 0;
 348    s->rregs[ESP_TCLO] = 0;
 349    s->rregs[ESP_TCMID] = 0;
 350    s->rregs[ESP_TCHI] = 0;
 351    esp_raise_irq(s);
 352}
 353
 354static void do_dma_pdma_cb(ESPState *s)
 355{
 356    int to_device = (s->ti_size < 0);
 357    int len = s->pdma_cur - s->pdma_start;
 358    if (s->do_cmd) {
 359        s->ti_size = 0;
 360        s->cmdlen = 0;
 361        s->do_cmd = 0;
 362        do_cmd(s, s->cmdbuf);
 363        return;
 364    }
 365    s->dma_left -= len;
 366    s->async_buf += len;
 367    s->async_len -= len;
 368    if (to_device) {
 369        s->ti_size += len;
 370    } else {
 371        s->ti_size -= len;
 372    }
 373    if (s->async_len == 0) {
 374        scsi_req_continue(s->current_req);
 375        /*
 376         * If there is still data to be read from the device then
 377         * complete the DMA operation immediately.  Otherwise defer
 378         * until the scsi layer has completed.
 379         */
 380        if (to_device || s->dma_left != 0 || s->ti_size == 0) {
 381            return;
 382        }
 383    }
 384
 385    /* Partially filled a scsi buffer. Complete immediately.  */
 386    esp_dma_done(s);
 387}
 388
 389static void esp_do_dma(ESPState *s)
 390{
 391    uint32_t len;
 392    int to_device;
 393
 394    len = s->dma_left;
 395    if (s->do_cmd) {
 396        /*
 397         * handle_ti_cmd() case: esp_do_dma() is called only from
 398         * handle_ti_cmd() with do_cmd != NULL (see the assert())
 399         */
 400        trace_esp_do_dma(s->cmdlen, len);
 401        assert (s->cmdlen <= sizeof(s->cmdbuf) &&
 402                len <= sizeof(s->cmdbuf) - s->cmdlen);
 403        if (s->dma_memory_read) {
 404            s->dma_memory_read(s->dma_opaque, &s->cmdbuf[s->cmdlen], len);
 405        } else {
 406            set_pdma(s, CMD, s->cmdlen, len);
 407            s->pdma_cb = do_dma_pdma_cb;
 408            esp_raise_drq(s);
 409            return;
 410        }
 411        trace_esp_handle_ti_cmd(s->cmdlen);
 412        s->ti_size = 0;
 413        s->cmdlen = 0;
 414        s->do_cmd = 0;
 415        do_cmd(s, s->cmdbuf);
 416        return;
 417    }
 418    if (s->async_len == 0) {
 419        /* Defer until data is available.  */
 420        return;
 421    }
 422    if (len > s->async_len) {
 423        len = s->async_len;
 424    }
 425    to_device = (s->ti_size < 0);
 426    if (to_device) {
 427        if (s->dma_memory_read) {
 428            s->dma_memory_read(s->dma_opaque, s->async_buf, len);
 429        } else {
 430            set_pdma(s, ASYNC, 0, len);
 431            s->pdma_cb = do_dma_pdma_cb;
 432            esp_raise_drq(s);
 433            return;
 434        }
 435    } else {
 436        if (s->dma_memory_write) {
 437            s->dma_memory_write(s->dma_opaque, s->async_buf, len);
 438        } else {
 439            set_pdma(s, ASYNC, 0, len);
 440            s->pdma_cb = do_dma_pdma_cb;
 441            esp_raise_drq(s);
 442            return;
 443        }
 444    }
 445    s->dma_left -= len;
 446    s->async_buf += len;
 447    s->async_len -= len;
 448    if (to_device)
 449        s->ti_size += len;
 450    else
 451        s->ti_size -= len;
 452    if (s->async_len == 0) {
 453        scsi_req_continue(s->current_req);
 454        /* If there is still data to be read from the device then
 455           complete the DMA operation immediately.  Otherwise defer
 456           until the scsi layer has completed.  */
 457        if (to_device || s->dma_left != 0 || s->ti_size == 0) {
 458            return;
 459        }
 460    }
 461
 462    /* Partially filled a scsi buffer. Complete immediately.  */
 463    esp_dma_done(s);
 464}
 465
 466static void esp_report_command_complete(ESPState *s, uint32_t status)
 467{
 468    trace_esp_command_complete();
 469    if (s->ti_size != 0) {
 470        trace_esp_command_complete_unexpected();
 471    }
 472    s->ti_size = 0;
 473    s->dma_left = 0;
 474    s->async_len = 0;
 475    if (status) {
 476        trace_esp_command_complete_fail();
 477    }
 478    s->status = status;
 479    s->rregs[ESP_RSTAT] = STAT_ST;
 480    esp_dma_done(s);
 481    if (s->current_req) {
 482        scsi_req_unref(s->current_req);
 483        s->current_req = NULL;
 484        s->current_dev = NULL;
 485    }
 486}
 487
 488void esp_command_complete(SCSIRequest *req, uint32_t status,
 489                          size_t resid)
 490{
 491    ESPState *s = req->hba_private;
 492
 493    if (s->rregs[ESP_RSTAT] & STAT_INT) {
 494        /* Defer handling command complete until the previous
 495         * interrupt has been handled.
 496         */
 497        trace_esp_command_complete_deferred();
 498        s->deferred_status = status;
 499        s->deferred_complete = true;
 500        return;
 501    }
 502    esp_report_command_complete(s, status);
 503}
 504
 505void esp_transfer_data(SCSIRequest *req, uint32_t len)
 506{
 507    ESPState *s = req->hba_private;
 508
 509    assert(!s->do_cmd);
 510    trace_esp_transfer_data(s->dma_left, s->ti_size);
 511    s->async_len = len;
 512    s->async_buf = scsi_req_get_buf(req);
 513    if (s->dma_left) {
 514        esp_do_dma(s);
 515    } else if (s->dma_counter != 0 && s->ti_size <= 0) {
 516        /* If this was the last part of a DMA transfer then the
 517           completion interrupt is deferred to here.  */
 518        esp_dma_done(s);
 519    }
 520}
 521
 522static void handle_ti(ESPState *s)
 523{
 524    uint32_t dmalen, minlen;
 525
 526    if (s->dma && !s->dma_enabled) {
 527        s->dma_cb = handle_ti;
 528        return;
 529    }
 530
 531    dmalen = s->rregs[ESP_TCLO];
 532    dmalen |= s->rregs[ESP_TCMID] << 8;
 533    dmalen |= s->rregs[ESP_TCHI] << 16;
 534    if (dmalen==0) {
 535      dmalen=0x10000;
 536    }
 537    s->dma_counter = dmalen;
 538
 539    if (s->do_cmd)
 540        minlen = (dmalen < ESP_CMDBUF_SZ) ? dmalen : ESP_CMDBUF_SZ;
 541    else if (s->ti_size < 0)
 542        minlen = (dmalen < -s->ti_size) ? dmalen : -s->ti_size;
 543    else
 544        minlen = (dmalen < s->ti_size) ? dmalen : s->ti_size;
 545    trace_esp_handle_ti(minlen);
 546    if (s->dma) {
 547        s->dma_left = minlen;
 548        s->rregs[ESP_RSTAT] &= ~STAT_TC;
 549        esp_do_dma(s);
 550    } else if (s->do_cmd) {
 551        trace_esp_handle_ti_cmd(s->cmdlen);
 552        s->ti_size = 0;
 553        s->cmdlen = 0;
 554        s->do_cmd = 0;
 555        do_cmd(s, s->cmdbuf);
 556    }
 557}
 558
 559void esp_hard_reset(ESPState *s)
 560{
 561    memset(s->rregs, 0, ESP_REGS);
 562    memset(s->wregs, 0, ESP_REGS);
 563    s->tchi_written = 0;
 564    s->ti_size = 0;
 565    s->ti_rptr = 0;
 566    s->ti_wptr = 0;
 567    s->dma = 0;
 568    s->do_cmd = 0;
 569    s->dma_cb = NULL;
 570
 571    s->rregs[ESP_CFG1] = 7;
 572}
 573
 574static void esp_soft_reset(ESPState *s)
 575{
 576    qemu_irq_lower(s->irq);
 577    qemu_irq_lower(s->irq_data);
 578    esp_hard_reset(s);
 579}
 580
 581static void parent_esp_reset(ESPState *s, int irq, int level)
 582{
 583    if (level) {
 584        esp_soft_reset(s);
 585    }
 586}
 587
 588uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
 589{
 590    uint32_t old_val;
 591
 592    trace_esp_mem_readb(saddr, s->rregs[saddr]);
 593    switch (saddr) {
 594    case ESP_FIFO:
 595        if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
 596            /* Data out.  */
 597            qemu_log_mask(LOG_UNIMP, "esp: PIO data read not implemented\n");
 598            s->rregs[ESP_FIFO] = 0;
 599        } else if (s->ti_rptr < s->ti_wptr) {
 600            s->ti_size--;
 601            s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
 602        }
 603        if (s->ti_rptr == s->ti_wptr) {
 604            s->ti_rptr = 0;
 605            s->ti_wptr = 0;
 606        }
 607        break;
 608    case ESP_RINTR:
 609        /* Clear sequence step, interrupt register and all status bits
 610           except TC */
 611        old_val = s->rregs[ESP_RINTR];
 612        s->rregs[ESP_RINTR] = 0;
 613        s->rregs[ESP_RSTAT] &= ~STAT_TC;
 614        s->rregs[ESP_RSEQ] = SEQ_CD;
 615        esp_lower_irq(s);
 616        if (s->deferred_complete) {
 617            esp_report_command_complete(s, s->deferred_status);
 618            s->deferred_complete = false;
 619        }
 620        return old_val;
 621    case ESP_TCHI:
 622        /* Return the unique id if the value has never been written */
 623        if (!s->tchi_written) {
 624            return s->chip_id;
 625        }
 626    default:
 627        break;
 628    }
 629    return s->rregs[saddr];
 630}
 631
 632void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
 633{
 634    trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
 635    switch (saddr) {
 636    case ESP_TCHI:
 637        s->tchi_written = true;
 638        /* fall through */
 639    case ESP_TCLO:
 640    case ESP_TCMID:
 641        s->rregs[ESP_RSTAT] &= ~STAT_TC;
 642        break;
 643    case ESP_FIFO:
 644        if (s->do_cmd) {
 645            if (s->cmdlen < ESP_CMDBUF_SZ) {
 646                s->cmdbuf[s->cmdlen++] = val & 0xff;
 647            } else {
 648                trace_esp_error_fifo_overrun();
 649            }
 650        } else if (s->ti_wptr == TI_BUFSZ - 1) {
 651            trace_esp_error_fifo_overrun();
 652        } else {
 653            s->ti_size++;
 654            s->ti_buf[s->ti_wptr++] = val & 0xff;
 655        }
 656        break;
 657    case ESP_CMD:
 658        s->rregs[saddr] = val;
 659        if (val & CMD_DMA) {
 660            s->dma = 1;
 661            /* Reload DMA counter.  */
 662            s->rregs[ESP_TCLO] = s->wregs[ESP_TCLO];
 663            s->rregs[ESP_TCMID] = s->wregs[ESP_TCMID];
 664            s->rregs[ESP_TCHI] = s->wregs[ESP_TCHI];
 665        } else {
 666            s->dma = 0;
 667        }
 668        switch(val & CMD_CMD) {
 669        case CMD_NOP:
 670            trace_esp_mem_writeb_cmd_nop(val);
 671            break;
 672        case CMD_FLUSH:
 673            trace_esp_mem_writeb_cmd_flush(val);
 674            //s->ti_size = 0;
 675            s->rregs[ESP_RINTR] = INTR_FC;
 676            s->rregs[ESP_RSEQ] = 0;
 677            s->rregs[ESP_RFLAGS] = 0;
 678            break;
 679        case CMD_RESET:
 680            trace_esp_mem_writeb_cmd_reset(val);
 681            esp_soft_reset(s);
 682            break;
 683        case CMD_BUSRESET:
 684            trace_esp_mem_writeb_cmd_bus_reset(val);
 685            s->rregs[ESP_RINTR] = INTR_RST;
 686            if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) {
 687                esp_raise_irq(s);
 688            }
 689            break;
 690        case CMD_TI:
 691            handle_ti(s);
 692            break;
 693        case CMD_ICCS:
 694            trace_esp_mem_writeb_cmd_iccs(val);
 695            write_response(s);
 696            s->rregs[ESP_RINTR] = INTR_FC;
 697            s->rregs[ESP_RSTAT] |= STAT_MI;
 698            break;
 699        case CMD_MSGACC:
 700            trace_esp_mem_writeb_cmd_msgacc(val);
 701            s->rregs[ESP_RINTR] = INTR_DC;
 702            s->rregs[ESP_RSEQ] = 0;
 703            s->rregs[ESP_RFLAGS] = 0;
 704            esp_raise_irq(s);
 705            break;
 706        case CMD_PAD:
 707            trace_esp_mem_writeb_cmd_pad(val);
 708            s->rregs[ESP_RSTAT] = STAT_TC;
 709            s->rregs[ESP_RINTR] = INTR_FC;
 710            s->rregs[ESP_RSEQ] = 0;
 711            break;
 712        case CMD_SATN:
 713            trace_esp_mem_writeb_cmd_satn(val);
 714            break;
 715        case CMD_RSTATN:
 716            trace_esp_mem_writeb_cmd_rstatn(val);
 717            break;
 718        case CMD_SEL:
 719            trace_esp_mem_writeb_cmd_sel(val);
 720            handle_s_without_atn(s);
 721            break;
 722        case CMD_SELATN:
 723            trace_esp_mem_writeb_cmd_selatn(val);
 724            handle_satn(s);
 725            break;
 726        case CMD_SELATNS:
 727            trace_esp_mem_writeb_cmd_selatns(val);
 728            handle_satn_stop(s);
 729            break;
 730        case CMD_ENSEL:
 731            trace_esp_mem_writeb_cmd_ensel(val);
 732            s->rregs[ESP_RINTR] = 0;
 733            break;
 734        case CMD_DISSEL:
 735            trace_esp_mem_writeb_cmd_dissel(val);
 736            s->rregs[ESP_RINTR] = 0;
 737            esp_raise_irq(s);
 738            break;
 739        default:
 740            trace_esp_error_unhandled_command(val);
 741            break;
 742        }
 743        break;
 744    case ESP_WBUSID ... ESP_WSYNO:
 745        break;
 746    case ESP_CFG1:
 747    case ESP_CFG2: case ESP_CFG3:
 748    case ESP_RES3: case ESP_RES4:
 749        s->rregs[saddr] = val;
 750        break;
 751    case ESP_WCCF ... ESP_WTEST:
 752        break;
 753    default:
 754        trace_esp_error_invalid_write(val, saddr);
 755        return;
 756    }
 757    s->wregs[saddr] = val;
 758}
 759
 760static bool esp_mem_accepts(void *opaque, hwaddr addr,
 761                            unsigned size, bool is_write,
 762                            MemTxAttrs attrs)
 763{
 764    return (size == 1) || (is_write && size == 4);
 765}
 766
 767static bool esp_pdma_needed(void *opaque)
 768{
 769    ESPState *s = opaque;
 770    return s->dma_memory_read == NULL && s->dma_memory_write == NULL &&
 771           s->dma_enabled;
 772}
 773
 774static const VMStateDescription vmstate_esp_pdma = {
 775    .name = "esp/pdma",
 776    .version_id = 1,
 777    .minimum_version_id = 1,
 778    .needed = esp_pdma_needed,
 779    .fields = (VMStateField[]) {
 780        VMSTATE_BUFFER(pdma_buf, ESPState),
 781        VMSTATE_INT32(pdma_origin, ESPState),
 782        VMSTATE_UINT32(pdma_len, ESPState),
 783        VMSTATE_UINT32(pdma_start, ESPState),
 784        VMSTATE_UINT32(pdma_cur, ESPState),
 785        VMSTATE_END_OF_LIST()
 786    }
 787};
 788
 789const VMStateDescription vmstate_esp = {
 790    .name ="esp",
 791    .version_id = 4,
 792    .minimum_version_id = 3,
 793    .fields = (VMStateField[]) {
 794        VMSTATE_BUFFER(rregs, ESPState),
 795        VMSTATE_BUFFER(wregs, ESPState),
 796        VMSTATE_INT32(ti_size, ESPState),
 797        VMSTATE_UINT32(ti_rptr, ESPState),
 798        VMSTATE_UINT32(ti_wptr, ESPState),
 799        VMSTATE_BUFFER(ti_buf, ESPState),
 800        VMSTATE_UINT32(status, ESPState),
 801        VMSTATE_UINT32(deferred_status, ESPState),
 802        VMSTATE_BOOL(deferred_complete, ESPState),
 803        VMSTATE_UINT32(dma, ESPState),
 804        VMSTATE_PARTIAL_BUFFER(cmdbuf, ESPState, 16),
 805        VMSTATE_BUFFER_START_MIDDLE_V(cmdbuf, ESPState, 16, 4),
 806        VMSTATE_UINT32(cmdlen, ESPState),
 807        VMSTATE_UINT32(do_cmd, ESPState),
 808        VMSTATE_UINT32(dma_left, ESPState),
 809        VMSTATE_END_OF_LIST()
 810    },
 811    .subsections = (const VMStateDescription * []) {
 812        &vmstate_esp_pdma,
 813        NULL
 814    }
 815};
 816
 817static void sysbus_esp_mem_write(void *opaque, hwaddr addr,
 818                                 uint64_t val, unsigned int size)
 819{
 820    SysBusESPState *sysbus = opaque;
 821    uint32_t saddr;
 822
 823    saddr = addr >> sysbus->it_shift;
 824    esp_reg_write(&sysbus->esp, saddr, val);
 825}
 826
 827static uint64_t sysbus_esp_mem_read(void *opaque, hwaddr addr,
 828                                    unsigned int size)
 829{
 830    SysBusESPState *sysbus = opaque;
 831    uint32_t saddr;
 832
 833    saddr = addr >> sysbus->it_shift;
 834    return esp_reg_read(&sysbus->esp, saddr);
 835}
 836
 837static const MemoryRegionOps sysbus_esp_mem_ops = {
 838    .read = sysbus_esp_mem_read,
 839    .write = sysbus_esp_mem_write,
 840    .endianness = DEVICE_NATIVE_ENDIAN,
 841    .valid.accepts = esp_mem_accepts,
 842};
 843
 844static void sysbus_esp_pdma_write(void *opaque, hwaddr addr,
 845                                  uint64_t val, unsigned int size)
 846{
 847    SysBusESPState *sysbus = opaque;
 848    ESPState *s = &sysbus->esp;
 849    uint32_t dmalen;
 850    uint8_t *buf = get_pdma_buf(s);
 851
 852    dmalen = s->rregs[ESP_TCLO];
 853    dmalen |= s->rregs[ESP_TCMID] << 8;
 854    dmalen |= s->rregs[ESP_TCHI] << 16;
 855    if (dmalen == 0 || s->pdma_len == 0) {
 856        return;
 857    }
 858    switch (size) {
 859    case 1:
 860        buf[s->pdma_cur++] = val;
 861        s->pdma_len--;
 862        dmalen--;
 863        break;
 864    case 2:
 865        buf[s->pdma_cur++] = val >> 8;
 866        buf[s->pdma_cur++] = val;
 867        s->pdma_len -= 2;
 868        dmalen -= 2;
 869        break;
 870    }
 871    s->rregs[ESP_TCLO] = dmalen & 0xff;
 872    s->rregs[ESP_TCMID] = dmalen >> 8;
 873    s->rregs[ESP_TCHI] = dmalen >> 16;
 874    if (s->pdma_len == 0 && s->pdma_cb) {
 875        esp_lower_drq(s);
 876        s->pdma_cb(s);
 877        s->pdma_cb = NULL;
 878    }
 879}
 880
 881static uint64_t sysbus_esp_pdma_read(void *opaque, hwaddr addr,
 882                                     unsigned int size)
 883{
 884    SysBusESPState *sysbus = opaque;
 885    ESPState *s = &sysbus->esp;
 886    uint8_t *buf = get_pdma_buf(s);
 887    uint64_t val = 0;
 888
 889    if (s->pdma_len == 0) {
 890        return 0;
 891    }
 892    switch (size) {
 893    case 1:
 894        val = buf[s->pdma_cur++];
 895        s->pdma_len--;
 896        break;
 897    case 2:
 898        val = buf[s->pdma_cur++];
 899        val = (val << 8) | buf[s->pdma_cur++];
 900        s->pdma_len -= 2;
 901        break;
 902    }
 903
 904    if (s->pdma_len == 0 && s->pdma_cb) {
 905        esp_lower_drq(s);
 906        s->pdma_cb(s);
 907        s->pdma_cb = NULL;
 908    }
 909    return val;
 910}
 911
 912static const MemoryRegionOps sysbus_esp_pdma_ops = {
 913    .read = sysbus_esp_pdma_read,
 914    .write = sysbus_esp_pdma_write,
 915    .endianness = DEVICE_NATIVE_ENDIAN,
 916    .valid.min_access_size = 1,
 917    .valid.max_access_size = 2,
 918};
 919
 920static const struct SCSIBusInfo esp_scsi_info = {
 921    .tcq = false,
 922    .max_target = ESP_MAX_DEVS,
 923    .max_lun = 7,
 924
 925    .transfer_data = esp_transfer_data,
 926    .complete = esp_command_complete,
 927    .cancel = esp_request_cancelled
 928};
 929
 930static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
 931{
 932    SysBusESPState *sysbus = ESP_STATE(opaque);
 933    ESPState *s = &sysbus->esp;
 934
 935    switch (irq) {
 936    case 0:
 937        parent_esp_reset(s, irq, level);
 938        break;
 939    case 1:
 940        esp_dma_enable(opaque, irq, level);
 941        break;
 942    }
 943}
 944
 945static void sysbus_esp_realize(DeviceState *dev, Error **errp)
 946{
 947    SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
 948    SysBusESPState *sysbus = ESP_STATE(dev);
 949    ESPState *s = &sysbus->esp;
 950
 951    sysbus_init_irq(sbd, &s->irq);
 952    sysbus_init_irq(sbd, &s->irq_data);
 953    assert(sysbus->it_shift != -1);
 954
 955    s->chip_id = TCHI_FAS100A;
 956    memory_region_init_io(&sysbus->iomem, OBJECT(sysbus), &sysbus_esp_mem_ops,
 957                          sysbus, "esp-regs", ESP_REGS << sysbus->it_shift);
 958    sysbus_init_mmio(sbd, &sysbus->iomem);
 959    memory_region_init_io(&sysbus->pdma, OBJECT(sysbus), &sysbus_esp_pdma_ops,
 960                          sysbus, "esp-pdma", 2);
 961    sysbus_init_mmio(sbd, &sysbus->pdma);
 962
 963    qdev_init_gpio_in(dev, sysbus_esp_gpio_demux, 2);
 964
 965    scsi_bus_new(&s->bus, sizeof(s->bus), dev, &esp_scsi_info, NULL);
 966}
 967
 968static void sysbus_esp_hard_reset(DeviceState *dev)
 969{
 970    SysBusESPState *sysbus = ESP_STATE(dev);
 971    esp_hard_reset(&sysbus->esp);
 972}
 973
 974static const VMStateDescription vmstate_sysbus_esp_scsi = {
 975    .name = "sysbusespscsi",
 976    .version_id = 1,
 977    .minimum_version_id = 1,
 978    .fields = (VMStateField[]) {
 979        VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
 980        VMSTATE_END_OF_LIST()
 981    }
 982};
 983
 984static void sysbus_esp_class_init(ObjectClass *klass, void *data)
 985{
 986    DeviceClass *dc = DEVICE_CLASS(klass);
 987
 988    dc->realize = sysbus_esp_realize;
 989    dc->reset = sysbus_esp_hard_reset;
 990    dc->vmsd = &vmstate_sysbus_esp_scsi;
 991    set_bit(DEVICE_CATEGORY_STORAGE, dc->categories);
 992}
 993
 994static const TypeInfo sysbus_esp_info = {
 995    .name          = TYPE_ESP,
 996    .parent        = TYPE_SYS_BUS_DEVICE,
 997    .instance_size = sizeof(SysBusESPState),
 998    .class_init    = sysbus_esp_class_init,
 999};
1000
1001static void esp_register_types(void)
1002{
1003    type_register_static(&sysbus_esp_info);
1004}
1005
1006type_init(esp_register_types)
1007