qemu/hw/ide/pci.c
<<
>>
Prefs
   1/*
   2 * QEMU IDE Emulation: PCI Bus support.
   3 *
   4 * Copyright (c) 2003 Fabrice Bellard
   5 * Copyright (c) 2006 Openedhand Ltd.
   6 *
   7 * Permission is hereby granted, free of charge, to any person obtaining a copy
   8 * of this software and associated documentation files (the "Software"), to deal
   9 * in the Software without restriction, including without limitation the rights
  10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11 * copies of the Software, and to permit persons to whom the Software is
  12 * furnished to do so, subject to the following conditions:
  13 *
  14 * The above copyright notice and this permission notice shall be included in
  15 * all copies or substantial portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23 * THE SOFTWARE.
  24 */
  25#include "qemu/osdep.h"
  26#include "hw/hw.h"
  27#include "hw/pci/pci.h"
  28#include "sysemu/dma.h"
  29#include "qemu/error-report.h"
  30#include "hw/ide/pci.h"
  31#include "trace.h"
  32
  33#define BMDMA_PAGE_SIZE 4096
  34
  35#define BM_MIGRATION_COMPAT_STATUS_BITS \
  36        (IDE_RETRY_DMA | IDE_RETRY_PIO | \
  37        IDE_RETRY_READ | IDE_RETRY_FLUSH)
  38
  39static uint64_t pci_ide_cmd_read(void *opaque, hwaddr addr, unsigned size)
  40{
  41    IDEBus *bus = opaque;
  42
  43    if (addr != 2 || size != 1) {
  44        return ((uint64_t)1 << (size * 8)) - 1;
  45    }
  46    return ide_status_read(bus, addr + 2);
  47}
  48
  49static void pci_ide_cmd_write(void *opaque, hwaddr addr,
  50                              uint64_t data, unsigned size)
  51{
  52    IDEBus *bus = opaque;
  53
  54    if (addr != 2 || size != 1) {
  55        return;
  56    }
  57    ide_cmd_write(bus, addr + 2, data);
  58}
  59
  60const MemoryRegionOps pci_ide_cmd_le_ops = {
  61    .read = pci_ide_cmd_read,
  62    .write = pci_ide_cmd_write,
  63    .endianness = DEVICE_LITTLE_ENDIAN,
  64};
  65
  66static uint64_t pci_ide_data_read(void *opaque, hwaddr addr, unsigned size)
  67{
  68    IDEBus *bus = opaque;
  69
  70    if (size == 1) {
  71        return ide_ioport_read(bus, addr);
  72    } else if (addr == 0) {
  73        if (size == 2) {
  74            return ide_data_readw(bus, addr);
  75        } else {
  76            return ide_data_readl(bus, addr);
  77        }
  78    }
  79    return ((uint64_t)1 << (size * 8)) - 1;
  80}
  81
  82static void pci_ide_data_write(void *opaque, hwaddr addr,
  83                               uint64_t data, unsigned size)
  84{
  85    IDEBus *bus = opaque;
  86
  87    if (size == 1) {
  88        ide_ioport_write(bus, addr, data);
  89    } else if (addr == 0) {
  90        if (size == 2) {
  91            ide_data_writew(bus, addr, data);
  92        } else {
  93            ide_data_writel(bus, addr, data);
  94        }
  95    }
  96}
  97
  98const MemoryRegionOps pci_ide_data_le_ops = {
  99    .read = pci_ide_data_read,
 100    .write = pci_ide_data_write,
 101    .endianness = DEVICE_LITTLE_ENDIAN,
 102};
 103
 104static void bmdma_start_dma(IDEDMA *dma, IDEState *s,
 105                            BlockCompletionFunc *dma_cb)
 106{
 107    BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
 108
 109    bm->dma_cb = dma_cb;
 110    bm->cur_prd_last = 0;
 111    bm->cur_prd_addr = 0;
 112    bm->cur_prd_len = 0;
 113
 114    if (bm->status & BM_STATUS_DMAING) {
 115        bm->dma_cb(bmdma_active_if(bm), 0);
 116    }
 117}
 118
 119/**
 120 * Prepare an sglist based on available PRDs.
 121 * @limit: How many bytes to prepare total.
 122 *
 123 * Returns the number of bytes prepared, -1 on error.
 124 * IDEState.io_buffer_size will contain the number of bytes described
 125 * by the PRDs, whether or not we added them to the sglist.
 126 */
 127static int32_t bmdma_prepare_buf(IDEDMA *dma, int32_t limit)
 128{
 129    BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
 130    IDEState *s = bmdma_active_if(bm);
 131    PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev);
 132    struct {
 133        uint32_t addr;
 134        uint32_t size;
 135    } prd;
 136    int l, len;
 137
 138    pci_dma_sglist_init(&s->sg, pci_dev,
 139                        s->nsector / (BMDMA_PAGE_SIZE / 512) + 1);
 140    s->io_buffer_size = 0;
 141    for(;;) {
 142        if (bm->cur_prd_len == 0) {
 143            /* end of table (with a fail safe of one page) */
 144            if (bm->cur_prd_last ||
 145                (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE) {
 146                return s->sg.size;
 147            }
 148            pci_dma_read(pci_dev, bm->cur_addr, &prd, 8);
 149            bm->cur_addr += 8;
 150            prd.addr = le32_to_cpu(prd.addr);
 151            prd.size = le32_to_cpu(prd.size);
 152            len = prd.size & 0xfffe;
 153            if (len == 0)
 154                len = 0x10000;
 155            bm->cur_prd_len = len;
 156            bm->cur_prd_addr = prd.addr;
 157            bm->cur_prd_last = (prd.size & 0x80000000);
 158        }
 159        l = bm->cur_prd_len;
 160        if (l > 0) {
 161            uint64_t sg_len;
 162
 163            /* Don't add extra bytes to the SGList; consume any remaining
 164             * PRDs from the guest, but ignore them. */
 165            sg_len = MIN(limit - s->sg.size, bm->cur_prd_len);
 166            if (sg_len) {
 167                qemu_sglist_add(&s->sg, bm->cur_prd_addr, sg_len);
 168            }
 169
 170            bm->cur_prd_addr += l;
 171            bm->cur_prd_len -= l;
 172            s->io_buffer_size += l;
 173        }
 174    }
 175
 176    qemu_sglist_destroy(&s->sg);
 177    s->io_buffer_size = 0;
 178    return -1;
 179}
 180
 181/* return 0 if buffer completed */
 182static int bmdma_rw_buf(IDEDMA *dma, int is_write)
 183{
 184    BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
 185    IDEState *s = bmdma_active_if(bm);
 186    PCIDevice *pci_dev = PCI_DEVICE(bm->pci_dev);
 187    struct {
 188        uint32_t addr;
 189        uint32_t size;
 190    } prd;
 191    int l, len;
 192
 193    for(;;) {
 194        l = s->io_buffer_size - s->io_buffer_index;
 195        if (l <= 0)
 196            break;
 197        if (bm->cur_prd_len == 0) {
 198            /* end of table (with a fail safe of one page) */
 199            if (bm->cur_prd_last ||
 200                (bm->cur_addr - bm->addr) >= BMDMA_PAGE_SIZE)
 201                return 0;
 202            pci_dma_read(pci_dev, bm->cur_addr, &prd, 8);
 203            bm->cur_addr += 8;
 204            prd.addr = le32_to_cpu(prd.addr);
 205            prd.size = le32_to_cpu(prd.size);
 206            len = prd.size & 0xfffe;
 207            if (len == 0)
 208                len = 0x10000;
 209            bm->cur_prd_len = len;
 210            bm->cur_prd_addr = prd.addr;
 211            bm->cur_prd_last = (prd.size & 0x80000000);
 212        }
 213        if (l > bm->cur_prd_len)
 214            l = bm->cur_prd_len;
 215        if (l > 0) {
 216            if (is_write) {
 217                pci_dma_write(pci_dev, bm->cur_prd_addr,
 218                              s->io_buffer + s->io_buffer_index, l);
 219            } else {
 220                pci_dma_read(pci_dev, bm->cur_prd_addr,
 221                             s->io_buffer + s->io_buffer_index, l);
 222            }
 223            bm->cur_prd_addr += l;
 224            bm->cur_prd_len -= l;
 225            s->io_buffer_index += l;
 226        }
 227    }
 228    return 1;
 229}
 230
 231static void bmdma_set_inactive(IDEDMA *dma, bool more)
 232{
 233    BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
 234
 235    bm->dma_cb = NULL;
 236    if (more) {
 237        bm->status |= BM_STATUS_DMAING;
 238    } else {
 239        bm->status &= ~BM_STATUS_DMAING;
 240    }
 241}
 242
 243static void bmdma_restart_dma(IDEDMA *dma)
 244{
 245    BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
 246
 247    bm->cur_addr = bm->addr;
 248}
 249
 250static void bmdma_cancel(BMDMAState *bm)
 251{
 252    if (bm->status & BM_STATUS_DMAING) {
 253        /* cancel DMA request */
 254        bmdma_set_inactive(&bm->dma, false);
 255    }
 256}
 257
 258static void bmdma_reset(IDEDMA *dma)
 259{
 260    BMDMAState *bm = DO_UPCAST(BMDMAState, dma, dma);
 261
 262    trace_bmdma_reset();
 263    bmdma_cancel(bm);
 264    bm->cmd = 0;
 265    bm->status = 0;
 266    bm->addr = 0;
 267    bm->cur_addr = 0;
 268    bm->cur_prd_last = 0;
 269    bm->cur_prd_addr = 0;
 270    bm->cur_prd_len = 0;
 271}
 272
 273static void bmdma_irq(void *opaque, int n, int level)
 274{
 275    BMDMAState *bm = opaque;
 276
 277    if (!level) {
 278        /* pass through lower */
 279        qemu_set_irq(bm->irq, level);
 280        return;
 281    }
 282
 283    bm->status |= BM_STATUS_INT;
 284
 285    /* trigger the real irq */
 286    qemu_set_irq(bm->irq, level);
 287}
 288
 289void bmdma_cmd_writeb(BMDMAState *bm, uint32_t val)
 290{
 291    trace_bmdma_cmd_writeb(val);
 292
 293    /* Ignore writes to SSBM if it keeps the old value */
 294    if ((val & BM_CMD_START) != (bm->cmd & BM_CMD_START)) {
 295        if (!(val & BM_CMD_START)) {
 296            ide_cancel_dma_sync(idebus_active_if(bm->bus));
 297            bm->status &= ~BM_STATUS_DMAING;
 298        } else {
 299            bm->cur_addr = bm->addr;
 300            if (!(bm->status & BM_STATUS_DMAING)) {
 301                bm->status |= BM_STATUS_DMAING;
 302                /* start dma transfer if possible */
 303                if (bm->dma_cb)
 304                    bm->dma_cb(bmdma_active_if(bm), 0);
 305            }
 306        }
 307    }
 308
 309    bm->cmd = val & 0x09;
 310}
 311
 312static uint64_t bmdma_addr_read(void *opaque, hwaddr addr,
 313                                unsigned width)
 314{
 315    BMDMAState *bm = opaque;
 316    uint32_t mask = (1ULL << (width * 8)) - 1;
 317    uint64_t data;
 318
 319    data = (bm->addr >> (addr * 8)) & mask;
 320    trace_bmdma_addr_read(data);
 321    return data;
 322}
 323
 324static void bmdma_addr_write(void *opaque, hwaddr addr,
 325                             uint64_t data, unsigned width)
 326{
 327    BMDMAState *bm = opaque;
 328    int shift = addr * 8;
 329    uint32_t mask = (1ULL << (width * 8)) - 1;
 330
 331    trace_bmdma_addr_write(data);
 332    bm->addr &= ~(mask << shift);
 333    bm->addr |= ((data & mask) << shift) & ~3;
 334}
 335
 336MemoryRegionOps bmdma_addr_ioport_ops = {
 337    .read = bmdma_addr_read,
 338    .write = bmdma_addr_write,
 339    .endianness = DEVICE_LITTLE_ENDIAN,
 340};
 341
 342static bool ide_bmdma_current_needed(void *opaque)
 343{
 344    BMDMAState *bm = opaque;
 345
 346    return (bm->cur_prd_len != 0);
 347}
 348
 349static bool ide_bmdma_status_needed(void *opaque)
 350{
 351    BMDMAState *bm = opaque;
 352
 353    /* Older versions abused some bits in the status register for internal
 354     * error state. If any of these bits are set, we must add a subsection to
 355     * transfer the real status register */
 356    uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
 357
 358    return ((bm->status & abused_bits) != 0);
 359}
 360
 361static int ide_bmdma_pre_save(void *opaque)
 362{
 363    BMDMAState *bm = opaque;
 364    uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
 365
 366    if (!(bm->status & BM_STATUS_DMAING) && bm->dma_cb) {
 367        bm->bus->error_status =
 368            ide_dma_cmd_to_retry(bmdma_active_if(bm)->dma_cmd);
 369    }
 370    bm->migration_retry_unit = bm->bus->retry_unit;
 371    bm->migration_retry_sector_num = bm->bus->retry_sector_num;
 372    bm->migration_retry_nsector = bm->bus->retry_nsector;
 373    bm->migration_compat_status =
 374        (bm->status & ~abused_bits) | (bm->bus->error_status & abused_bits);
 375
 376    return 0;
 377}
 378
 379/* This function accesses bm->bus->error_status which is loaded only after
 380 * BMDMA itself. This is why the function is called from ide_pci_post_load
 381 * instead of being registered with VMState where it would run too early. */
 382static int ide_bmdma_post_load(void *opaque, int version_id)
 383{
 384    BMDMAState *bm = opaque;
 385    uint8_t abused_bits = BM_MIGRATION_COMPAT_STATUS_BITS;
 386
 387    if (bm->status == 0) {
 388        bm->status = bm->migration_compat_status & ~abused_bits;
 389        bm->bus->error_status |= bm->migration_compat_status & abused_bits;
 390    }
 391    if (bm->bus->error_status) {
 392        bm->bus->retry_sector_num = bm->migration_retry_sector_num;
 393        bm->bus->retry_nsector = bm->migration_retry_nsector;
 394        bm->bus->retry_unit = bm->migration_retry_unit;
 395    }
 396
 397    return 0;
 398}
 399
 400static const VMStateDescription vmstate_bmdma_current = {
 401    .name = "ide bmdma_current",
 402    .version_id = 1,
 403    .minimum_version_id = 1,
 404    .needed = ide_bmdma_current_needed,
 405    .fields = (VMStateField[]) {
 406        VMSTATE_UINT32(cur_addr, BMDMAState),
 407        VMSTATE_UINT32(cur_prd_last, BMDMAState),
 408        VMSTATE_UINT32(cur_prd_addr, BMDMAState),
 409        VMSTATE_UINT32(cur_prd_len, BMDMAState),
 410        VMSTATE_END_OF_LIST()
 411    }
 412};
 413
 414static const VMStateDescription vmstate_bmdma_status = {
 415    .name ="ide bmdma/status",
 416    .version_id = 1,
 417    .minimum_version_id = 1,
 418    .needed = ide_bmdma_status_needed,
 419    .fields = (VMStateField[]) {
 420        VMSTATE_UINT8(status, BMDMAState),
 421        VMSTATE_END_OF_LIST()
 422    }
 423};
 424
 425static const VMStateDescription vmstate_bmdma = {
 426    .name = "ide bmdma",
 427    .version_id = 3,
 428    .minimum_version_id = 0,
 429    .pre_save  = ide_bmdma_pre_save,
 430    .fields = (VMStateField[]) {
 431        VMSTATE_UINT8(cmd, BMDMAState),
 432        VMSTATE_UINT8(migration_compat_status, BMDMAState),
 433        VMSTATE_UINT32(addr, BMDMAState),
 434        VMSTATE_INT64(migration_retry_sector_num, BMDMAState),
 435        VMSTATE_UINT32(migration_retry_nsector, BMDMAState),
 436        VMSTATE_UINT8(migration_retry_unit, BMDMAState),
 437        VMSTATE_END_OF_LIST()
 438    },
 439    .subsections = (const VMStateDescription*[]) {
 440        &vmstate_bmdma_current,
 441        &vmstate_bmdma_status,
 442        NULL
 443    }
 444};
 445
 446static int ide_pci_post_load(void *opaque, int version_id)
 447{
 448    PCIIDEState *d = opaque;
 449    int i;
 450
 451    for(i = 0; i < 2; i++) {
 452        /* current versions always store 0/1, but older version
 453           stored bigger values. We only need last bit */
 454        d->bmdma[i].migration_retry_unit &= 1;
 455        ide_bmdma_post_load(&d->bmdma[i], -1);
 456    }
 457
 458    return 0;
 459}
 460
 461const VMStateDescription vmstate_ide_pci = {
 462    .name = "ide",
 463    .version_id = 3,
 464    .minimum_version_id = 0,
 465    .post_load = ide_pci_post_load,
 466    .fields = (VMStateField[]) {
 467        VMSTATE_PCI_DEVICE(parent_obj, PCIIDEState),
 468        VMSTATE_STRUCT_ARRAY(bmdma, PCIIDEState, 2, 0,
 469                             vmstate_bmdma, BMDMAState),
 470        VMSTATE_IDE_BUS_ARRAY(bus, PCIIDEState, 2),
 471        VMSTATE_IDE_DRIVES(bus[0].ifs, PCIIDEState),
 472        VMSTATE_IDE_DRIVES(bus[1].ifs, PCIIDEState),
 473        VMSTATE_END_OF_LIST()
 474    }
 475};
 476
 477void pci_ide_create_devs(PCIDevice *dev, DriveInfo **hd_table)
 478{
 479    PCIIDEState *d = PCI_IDE(dev);
 480    static const int bus[4]  = { 0, 0, 1, 1 };
 481    static const int unit[4] = { 0, 1, 0, 1 };
 482    int i;
 483
 484    for (i = 0; i < 4; i++) {
 485        if (hd_table[i] == NULL)
 486            continue;
 487        ide_create_drive(d->bus+bus[i], unit[i], hd_table[i]);
 488    }
 489}
 490
 491static const struct IDEDMAOps bmdma_ops = {
 492    .start_dma = bmdma_start_dma,
 493    .prepare_buf = bmdma_prepare_buf,
 494    .rw_buf = bmdma_rw_buf,
 495    .restart_dma = bmdma_restart_dma,
 496    .set_inactive = bmdma_set_inactive,
 497    .reset = bmdma_reset,
 498};
 499
 500void bmdma_init(IDEBus *bus, BMDMAState *bm, PCIIDEState *d)
 501{
 502    if (bus->dma == &bm->dma) {
 503        return;
 504    }
 505
 506    bm->dma.ops = &bmdma_ops;
 507    bus->dma = &bm->dma;
 508    bm->irq = bus->irq;
 509    bus->irq = qemu_allocate_irq(bmdma_irq, bm, 0);
 510    bm->pci_dev = d;
 511}
 512
 513static const TypeInfo pci_ide_type_info = {
 514    .name = TYPE_PCI_IDE,
 515    .parent = TYPE_PCI_DEVICE,
 516    .instance_size = sizeof(PCIIDEState),
 517    .abstract = true,
 518    .interfaces = (InterfaceInfo[]) {
 519        { INTERFACE_CONVENTIONAL_PCI_DEVICE },
 520        { },
 521    },
 522};
 523
 524static void pci_ide_register_types(void)
 525{
 526    type_register_static(&pci_ide_type_info);
 527}
 528
 529type_init(pci_ide_register_types)
 530