qemu/hw/dma.c
<<
>>
Prefs
   1/*
   2 * QEMU DMA emulation
   3 *
   4 * Copyright (c) 2003-2004 Vassili Karpov (malc)
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24#include "hw.h"
  25#include "isa.h"
  26
  27/* #define DEBUG_DMA */
  28
  29#define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__)
  30#ifdef DEBUG_DMA
  31#define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
  32#define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
  33#else
  34#define linfo(...)
  35#define ldebug(...)
  36#endif
  37
  38struct dma_regs {
  39    int now[2];
  40    uint16_t base[2];
  41    uint8_t mode;
  42    uint8_t page;
  43    uint8_t pageh;
  44    uint8_t dack;
  45    uint8_t eop;
  46    DMA_transfer_handler transfer_handler;
  47    void *opaque;
  48};
  49
  50#define ADDR 0
  51#define COUNT 1
  52
  53static struct dma_cont {
  54    uint8_t status;
  55    uint8_t command;
  56    uint8_t mask;
  57    uint8_t flip_flop;
  58    int dshift;
  59    struct dma_regs regs[4];
  60    qemu_irq *cpu_request_exit;
  61} dma_controllers[2];
  62
  63enum {
  64    CMD_MEMORY_TO_MEMORY = 0x01,
  65    CMD_FIXED_ADDRESS    = 0x02,
  66    CMD_BLOCK_CONTROLLER = 0x04,
  67    CMD_COMPRESSED_TIME  = 0x08,
  68    CMD_CYCLIC_PRIORITY  = 0x10,
  69    CMD_EXTENDED_WRITE   = 0x20,
  70    CMD_LOW_DREQ         = 0x40,
  71    CMD_LOW_DACK         = 0x80,
  72    CMD_NOT_SUPPORTED    = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
  73    | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
  74    | CMD_LOW_DREQ | CMD_LOW_DACK
  75
  76};
  77
  78static void DMA_run (void);
  79
  80static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
  81
  82static void write_page (void *opaque, uint32_t nport, uint32_t data)
  83{
  84    struct dma_cont *d = opaque;
  85    int ichan;
  86
  87    ichan = channels[nport & 7];
  88    if (-1 == ichan) {
  89        dolog ("invalid channel %#x %#x\n", nport, data);
  90        return;
  91    }
  92    d->regs[ichan].page = data;
  93}
  94
  95static void write_pageh (void *opaque, uint32_t nport, uint32_t data)
  96{
  97    struct dma_cont *d = opaque;
  98    int ichan;
  99
 100    ichan = channels[nport & 7];
 101    if (-1 == ichan) {
 102        dolog ("invalid channel %#x %#x\n", nport, data);
 103        return;
 104    }
 105    d->regs[ichan].pageh = data;
 106}
 107
 108static uint32_t read_page (void *opaque, uint32_t nport)
 109{
 110    struct dma_cont *d = opaque;
 111    int ichan;
 112
 113    ichan = channels[nport & 7];
 114    if (-1 == ichan) {
 115        dolog ("invalid channel read %#x\n", nport);
 116        return 0;
 117    }
 118    return d->regs[ichan].page;
 119}
 120
 121static uint32_t read_pageh (void *opaque, uint32_t nport)
 122{
 123    struct dma_cont *d = opaque;
 124    int ichan;
 125
 126    ichan = channels[nport & 7];
 127    if (-1 == ichan) {
 128        dolog ("invalid channel read %#x\n", nport);
 129        return 0;
 130    }
 131    return d->regs[ichan].pageh;
 132}
 133
 134static inline void init_chan (struct dma_cont *d, int ichan)
 135{
 136    struct dma_regs *r;
 137
 138    r = d->regs + ichan;
 139    r->now[ADDR] = r->base[ADDR] << d->dshift;
 140    r->now[COUNT] = 0;
 141}
 142
 143static inline int getff (struct dma_cont *d)
 144{
 145    int ff;
 146
 147    ff = d->flip_flop;
 148    d->flip_flop = !ff;
 149    return ff;
 150}
 151
 152static uint32_t read_chan (void *opaque, uint32_t nport)
 153{
 154    struct dma_cont *d = opaque;
 155    int ichan, nreg, iport, ff, val, dir;
 156    struct dma_regs *r;
 157
 158    iport = (nport >> d->dshift) & 0x0f;
 159    ichan = iport >> 1;
 160    nreg = iport & 1;
 161    r = d->regs + ichan;
 162
 163    dir = ((r->mode >> 5) & 1) ? -1 : 1;
 164    ff = getff (d);
 165    if (nreg)
 166        val = (r->base[COUNT] << d->dshift) - r->now[COUNT];
 167    else
 168        val = r->now[ADDR] + r->now[COUNT] * dir;
 169
 170    ldebug ("read_chan %#x -> %d\n", iport, val);
 171    return (val >> (d->dshift + (ff << 3))) & 0xff;
 172}
 173
 174static void write_chan (void *opaque, uint32_t nport, uint32_t data)
 175{
 176    struct dma_cont *d = opaque;
 177    int iport, ichan, nreg;
 178    struct dma_regs *r;
 179
 180    iport = (nport >> d->dshift) & 0x0f;
 181    ichan = iport >> 1;
 182    nreg = iport & 1;
 183    r = d->regs + ichan;
 184    if (getff (d)) {
 185        r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
 186        init_chan (d, ichan);
 187    } else {
 188        r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
 189    }
 190}
 191
 192static void write_cont (void *opaque, uint32_t nport, uint32_t data)
 193{
 194    struct dma_cont *d = opaque;
 195    int iport, ichan = 0;
 196
 197    iport = (nport >> d->dshift) & 0x0f;
 198    switch (iport) {
 199    case 0x08:                  /* command */
 200        if ((data != 0) && (data & CMD_NOT_SUPPORTED)) {
 201            dolog ("command %#x not supported\n", data);
 202            return;
 203        }
 204        d->command = data;
 205        break;
 206
 207    case 0x09:
 208        ichan = data & 3;
 209        if (data & 4) {
 210            d->status |= 1 << (ichan + 4);
 211        }
 212        else {
 213            d->status &= ~(1 << (ichan + 4));
 214        }
 215        d->status &= ~(1 << ichan);
 216        DMA_run();
 217        break;
 218
 219    case 0x0a:                  /* single mask */
 220        if (data & 4)
 221            d->mask |= 1 << (data & 3);
 222        else
 223            d->mask &= ~(1 << (data & 3));
 224        DMA_run();
 225        break;
 226
 227    case 0x0b:                  /* mode */
 228        {
 229            ichan = data & 3;
 230#ifdef DEBUG_DMA
 231            {
 232                int op, ai, dir, opmode;
 233                op = (data >> 2) & 3;
 234                ai = (data >> 4) & 1;
 235                dir = (data >> 5) & 1;
 236                opmode = (data >> 6) & 3;
 237
 238                linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
 239                       ichan, op, ai, dir, opmode);
 240            }
 241#endif
 242            d->regs[ichan].mode = data;
 243            break;
 244        }
 245
 246    case 0x0c:                  /* clear flip flop */
 247        d->flip_flop = 0;
 248        break;
 249
 250    case 0x0d:                  /* reset */
 251        d->flip_flop = 0;
 252        d->mask = ~0;
 253        d->status = 0;
 254        d->command = 0;
 255        break;
 256
 257    case 0x0e:                  /* clear mask for all channels */
 258        d->mask = 0;
 259        DMA_run();
 260        break;
 261
 262    case 0x0f:                  /* write mask for all channels */
 263        d->mask = data;
 264        DMA_run();
 265        break;
 266
 267    default:
 268        dolog ("unknown iport %#x\n", iport);
 269        break;
 270    }
 271
 272#ifdef DEBUG_DMA
 273    if (0xc != iport) {
 274        linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
 275               nport, ichan, data);
 276    }
 277#endif
 278}
 279
 280static uint32_t read_cont (void *opaque, uint32_t nport)
 281{
 282    struct dma_cont *d = opaque;
 283    int iport, val;
 284
 285    iport = (nport >> d->dshift) & 0x0f;
 286    switch (iport) {
 287    case 0x08:                  /* status */
 288        val = d->status;
 289        d->status &= 0xf0;
 290        break;
 291    case 0x0f:                  /* mask */
 292        val = d->mask;
 293        break;
 294    default:
 295        val = 0;
 296        break;
 297    }
 298
 299    ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val);
 300    return val;
 301}
 302
 303int DMA_get_channel_mode (int nchan)
 304{
 305    return dma_controllers[nchan > 3].regs[nchan & 3].mode;
 306}
 307
 308void DMA_hold_DREQ (int nchan)
 309{
 310    int ncont, ichan;
 311
 312    ncont = nchan > 3;
 313    ichan = nchan & 3;
 314    linfo ("held cont=%d chan=%d\n", ncont, ichan);
 315    dma_controllers[ncont].status |= 1 << (ichan + 4);
 316    DMA_run();
 317}
 318
 319void DMA_release_DREQ (int nchan)
 320{
 321    int ncont, ichan;
 322
 323    ncont = nchan > 3;
 324    ichan = nchan & 3;
 325    linfo ("released cont=%d chan=%d\n", ncont, ichan);
 326    dma_controllers[ncont].status &= ~(1 << (ichan + 4));
 327    DMA_run();
 328}
 329
 330static void channel_run (int ncont, int ichan)
 331{
 332    int n;
 333    struct dma_regs *r = &dma_controllers[ncont].regs[ichan];
 334#ifdef DEBUG_DMA
 335    int dir, opmode;
 336
 337    dir = (r->mode >> 5) & 1;
 338    opmode = (r->mode >> 6) & 3;
 339
 340    if (dir) {
 341        dolog ("DMA in address decrement mode\n");
 342    }
 343    if (opmode != 1) {
 344        dolog ("DMA not in single mode select %#x\n", opmode);
 345    }
 346#endif
 347
 348    n = r->transfer_handler (r->opaque, ichan + (ncont << 2),
 349                             r->now[COUNT], (r->base[COUNT] + 1) << ncont);
 350    r->now[COUNT] = n;
 351    ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
 352}
 353
 354static QEMUBH *dma_bh;
 355
 356static void DMA_run (void)
 357{
 358    struct dma_cont *d;
 359    int icont, ichan;
 360    int rearm = 0;
 361
 362    d = dma_controllers;
 363
 364    for (icont = 0; icont < 2; icont++, d++) {
 365        for (ichan = 0; ichan < 4; ichan++) {
 366            int mask;
 367
 368            mask = 1 << ichan;
 369
 370            if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) {
 371                channel_run (icont, ichan);
 372                rearm = 1;
 373            }
 374        }
 375    }
 376
 377    if (rearm)
 378        qemu_bh_schedule_idle(dma_bh);
 379}
 380
 381static void DMA_run_bh(void *unused)
 382{
 383    DMA_run();
 384}
 385
 386void DMA_register_channel (int nchan,
 387                           DMA_transfer_handler transfer_handler,
 388                           void *opaque)
 389{
 390    struct dma_regs *r;
 391    int ichan, ncont;
 392
 393    ncont = nchan > 3;
 394    ichan = nchan & 3;
 395
 396    r = dma_controllers[ncont].regs + ichan;
 397    r->transfer_handler = transfer_handler;
 398    r->opaque = opaque;
 399}
 400
 401int DMA_read_memory (int nchan, void *buf, int pos, int len)
 402{
 403    struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
 404    target_phys_addr_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
 405
 406    if (r->mode & 0x20) {
 407        int i;
 408        uint8_t *p = buf;
 409
 410        cpu_physical_memory_read (addr - pos - len, buf, len);
 411        /* What about 16bit transfers? */
 412        for (i = 0; i < len >> 1; i++) {
 413            uint8_t b = p[len - i - 1];
 414            p[i] = b;
 415        }
 416    }
 417    else
 418        cpu_physical_memory_read (addr + pos, buf, len);
 419
 420    return len;
 421}
 422
 423int DMA_write_memory (int nchan, void *buf, int pos, int len)
 424{
 425    struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
 426    target_phys_addr_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
 427
 428    if (r->mode & 0x20) {
 429        int i;
 430        uint8_t *p = buf;
 431
 432        cpu_physical_memory_write (addr - pos - len, buf, len);
 433        /* What about 16bit transfers? */
 434        for (i = 0; i < len; i++) {
 435            uint8_t b = p[len - i - 1];
 436            p[i] = b;
 437        }
 438    }
 439    else
 440        cpu_physical_memory_write (addr + pos, buf, len);
 441
 442    return len;
 443}
 444
 445/* request the emulator to transfer a new DMA memory block ASAP */
 446void DMA_schedule(int nchan)
 447{
 448    struct dma_cont *d = &dma_controllers[nchan > 3];
 449
 450    qemu_irq_pulse(*d->cpu_request_exit);
 451}
 452
 453static void dma_reset(void *opaque)
 454{
 455    struct dma_cont *d = opaque;
 456    write_cont (d, (0x0d << d->dshift), 0);
 457}
 458
 459static int dma_phony_handler (void *opaque, int nchan, int dma_pos, int dma_len)
 460{
 461    dolog ("unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d\n",
 462           nchan, dma_pos, dma_len);
 463    return dma_pos;
 464}
 465
 466/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
 467static void dma_init2(struct dma_cont *d, int base, int dshift,
 468                      int page_base, int pageh_base,
 469                      qemu_irq *cpu_request_exit)
 470{
 471    static const int page_port_list[] = { 0x1, 0x2, 0x3, 0x7 };
 472    int i;
 473
 474    d->dshift = dshift;
 475    d->cpu_request_exit = cpu_request_exit;
 476    for (i = 0; i < 8; i++) {
 477        register_ioport_write (base + (i << dshift), 1, 1, write_chan, d);
 478        register_ioport_read (base + (i << dshift), 1, 1, read_chan, d);
 479    }
 480    for (i = 0; i < ARRAY_SIZE (page_port_list); i++) {
 481        register_ioport_write (page_base + page_port_list[i], 1, 1,
 482                               write_page, d);
 483        register_ioport_read (page_base + page_port_list[i], 1, 1,
 484                              read_page, d);
 485        if (pageh_base >= 0) {
 486            register_ioport_write (pageh_base + page_port_list[i], 1, 1,
 487                                   write_pageh, d);
 488            register_ioport_read (pageh_base + page_port_list[i], 1, 1,
 489                                  read_pageh, d);
 490        }
 491    }
 492    for (i = 0; i < 8; i++) {
 493        register_ioport_write (base + ((i + 8) << dshift), 1, 1,
 494                               write_cont, d);
 495        register_ioport_read (base + ((i + 8) << dshift), 1, 1,
 496                              read_cont, d);
 497    }
 498    qemu_register_reset(dma_reset, d);
 499    dma_reset(d);
 500    for (i = 0; i < ARRAY_SIZE (d->regs); ++i) {
 501        d->regs[i].transfer_handler = dma_phony_handler;
 502    }
 503}
 504
 505static const VMStateDescription vmstate_dma_regs = {
 506    .name = "dma_regs",
 507    .version_id = 1,
 508    .minimum_version_id = 1,
 509    .minimum_version_id_old = 1,
 510    .fields      = (VMStateField []) {
 511        VMSTATE_INT32_ARRAY(now, struct dma_regs, 2),
 512        VMSTATE_UINT16_ARRAY(base, struct dma_regs, 2),
 513        VMSTATE_UINT8(mode, struct dma_regs),
 514        VMSTATE_UINT8(page, struct dma_regs),
 515        VMSTATE_UINT8(pageh, struct dma_regs),
 516        VMSTATE_UINT8(dack, struct dma_regs),
 517        VMSTATE_UINT8(eop, struct dma_regs),
 518        VMSTATE_END_OF_LIST()
 519    }
 520};
 521
 522static int dma_post_load(void *opaque, int version_id)
 523{
 524    DMA_run();
 525
 526    return 0;
 527}
 528
 529static const VMStateDescription vmstate_dma = {
 530    .name = "dma",
 531    .version_id = 1,
 532    .minimum_version_id = 1,
 533    .minimum_version_id_old = 1,
 534    .post_load = dma_post_load,
 535    .fields      = (VMStateField []) {
 536        VMSTATE_UINT8(command, struct dma_cont),
 537        VMSTATE_UINT8(mask, struct dma_cont),
 538        VMSTATE_UINT8(flip_flop, struct dma_cont),
 539        VMSTATE_INT32(dshift, struct dma_cont),
 540        VMSTATE_STRUCT_ARRAY(regs, struct dma_cont, 4, 1, vmstate_dma_regs, struct dma_regs),
 541        VMSTATE_END_OF_LIST()
 542    }
 543};
 544
 545void DMA_init(int high_page_enable, qemu_irq *cpu_request_exit)
 546{
 547    dma_init2(&dma_controllers[0], 0x00, 0, 0x80,
 548              high_page_enable ? 0x480 : -1, cpu_request_exit);
 549    dma_init2(&dma_controllers[1], 0xc0, 1, 0x88,
 550              high_page_enable ? 0x488 : -1, cpu_request_exit);
 551    vmstate_register (NULL, 0, &vmstate_dma, &dma_controllers[0]);
 552    vmstate_register (NULL, 1, &vmstate_dma, &dma_controllers[1]);
 553
 554    dma_bh = qemu_bh_new(DMA_run_bh, NULL);
 555}
 556