qemu/hw/dma.c
<<
>>
Prefs
   1/*
   2 * QEMU DMA emulation
   3 *
   4 * Copyright (c) 2003-2004 Vassili Karpov (malc)
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24#include "hw.h"
  25#include "isa.h"
  26
  27/* #define DEBUG_DMA */
  28
  29#define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__)
  30#ifdef DEBUG_DMA
  31#define lwarn(...) fprintf (stderr, "dma: " __VA_ARGS__)
  32#define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__)
  33#define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__)
  34#else
  35#define lwarn(...)
  36#define linfo(...)
  37#define ldebug(...)
  38#endif
  39
  40struct dma_regs {
  41    int now[2];
  42    uint16_t base[2];
  43    uint8_t mode;
  44    uint8_t page;
  45    uint8_t pageh;
  46    uint8_t dack;
  47    uint8_t eop;
  48    DMA_transfer_handler transfer_handler;
  49    void *opaque;
  50};
  51
  52#define ADDR 0
  53#define COUNT 1
  54
  55static struct dma_cont {
  56    uint8_t status;
  57    uint8_t command;
  58    uint8_t mask;
  59    uint8_t flip_flop;
  60    int dshift;
  61    struct dma_regs regs[4];
  62} dma_controllers[2];
  63
  64enum {
  65    CMD_MEMORY_TO_MEMORY = 0x01,
  66    CMD_FIXED_ADDRESS    = 0x02,
  67    CMD_BLOCK_CONTROLLER = 0x04,
  68    CMD_COMPRESSED_TIME  = 0x08,
  69    CMD_CYCLIC_PRIORITY  = 0x10,
  70    CMD_EXTENDED_WRITE   = 0x20,
  71    CMD_LOW_DREQ         = 0x40,
  72    CMD_LOW_DACK         = 0x80,
  73    CMD_NOT_SUPPORTED    = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS
  74    | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE
  75    | CMD_LOW_DREQ | CMD_LOW_DACK
  76
  77};
  78
  79static void DMA_run (void);
  80
  81static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0};
  82
  83static void write_page (void *opaque, uint32_t nport, uint32_t data)
  84{
  85    struct dma_cont *d = opaque;
  86    int ichan;
  87
  88    ichan = channels[nport & 7];
  89    if (-1 == ichan) {
  90        dolog ("invalid channel %#x %#x\n", nport, data);
  91        return;
  92    }
  93    d->regs[ichan].page = data;
  94}
  95
  96static void write_pageh (void *opaque, uint32_t nport, uint32_t data)
  97{
  98    struct dma_cont *d = opaque;
  99    int ichan;
 100
 101    ichan = channels[nport & 7];
 102    if (-1 == ichan) {
 103        dolog ("invalid channel %#x %#x\n", nport, data);
 104        return;
 105    }
 106    d->regs[ichan].pageh = data;
 107}
 108
 109static uint32_t read_page (void *opaque, uint32_t nport)
 110{
 111    struct dma_cont *d = opaque;
 112    int ichan;
 113
 114    ichan = channels[nport & 7];
 115    if (-1 == ichan) {
 116        dolog ("invalid channel read %#x\n", nport);
 117        return 0;
 118    }
 119    return d->regs[ichan].page;
 120}
 121
 122static uint32_t read_pageh (void *opaque, uint32_t nport)
 123{
 124    struct dma_cont *d = opaque;
 125    int ichan;
 126
 127    ichan = channels[nport & 7];
 128    if (-1 == ichan) {
 129        dolog ("invalid channel read %#x\n", nport);
 130        return 0;
 131    }
 132    return d->regs[ichan].pageh;
 133}
 134
 135static inline void init_chan (struct dma_cont *d, int ichan)
 136{
 137    struct dma_regs *r;
 138
 139    r = d->regs + ichan;
 140    r->now[ADDR] = r->base[ADDR] << d->dshift;
 141    r->now[COUNT] = 0;
 142}
 143
 144static inline int getff (struct dma_cont *d)
 145{
 146    int ff;
 147
 148    ff = d->flip_flop;
 149    d->flip_flop = !ff;
 150    return ff;
 151}
 152
 153static uint32_t read_chan (void *opaque, uint32_t nport)
 154{
 155    struct dma_cont *d = opaque;
 156    int ichan, nreg, iport, ff, val, dir;
 157    struct dma_regs *r;
 158
 159    iport = (nport >> d->dshift) & 0x0f;
 160    ichan = iport >> 1;
 161    nreg = iport & 1;
 162    r = d->regs + ichan;
 163
 164    dir = ((r->mode >> 5) & 1) ? -1 : 1;
 165    ff = getff (d);
 166    if (nreg)
 167        val = (r->base[COUNT] << d->dshift) - r->now[COUNT];
 168    else
 169        val = r->now[ADDR] + r->now[COUNT] * dir;
 170
 171    ldebug ("read_chan %#x -> %d\n", iport, val);
 172    return (val >> (d->dshift + (ff << 3))) & 0xff;
 173}
 174
 175static void write_chan (void *opaque, uint32_t nport, uint32_t data)
 176{
 177    struct dma_cont *d = opaque;
 178    int iport, ichan, nreg;
 179    struct dma_regs *r;
 180
 181    iport = (nport >> d->dshift) & 0x0f;
 182    ichan = iport >> 1;
 183    nreg = iport & 1;
 184    r = d->regs + ichan;
 185    if (getff (d)) {
 186        r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00);
 187        init_chan (d, ichan);
 188    } else {
 189        r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff);
 190    }
 191}
 192
 193static void write_cont (void *opaque, uint32_t nport, uint32_t data)
 194{
 195    struct dma_cont *d = opaque;
 196    int iport, ichan = 0;
 197
 198    iport = (nport >> d->dshift) & 0x0f;
 199    switch (iport) {
 200    case 0x08:                  /* command */
 201        if ((data != 0) && (data & CMD_NOT_SUPPORTED)) {
 202            dolog ("command %#x not supported\n", data);
 203            return;
 204        }
 205        d->command = data;
 206        break;
 207
 208    case 0x09:
 209        ichan = data & 3;
 210        if (data & 4) {
 211            d->status |= 1 << (ichan + 4);
 212        }
 213        else {
 214            d->status &= ~(1 << (ichan + 4));
 215        }
 216        d->status &= ~(1 << ichan);
 217        DMA_run();
 218        break;
 219
 220    case 0x0a:                  /* single mask */
 221        if (data & 4)
 222            d->mask |= 1 << (data & 3);
 223        else
 224            d->mask &= ~(1 << (data & 3));
 225        DMA_run();
 226        break;
 227
 228    case 0x0b:                  /* mode */
 229        {
 230            ichan = data & 3;
 231#ifdef DEBUG_DMA
 232            {
 233                int op, ai, dir, opmode;
 234                op = (data >> 2) & 3;
 235                ai = (data >> 4) & 1;
 236                dir = (data >> 5) & 1;
 237                opmode = (data >> 6) & 3;
 238
 239                linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n",
 240                       ichan, op, ai, dir, opmode);
 241            }
 242#endif
 243            d->regs[ichan].mode = data;
 244            break;
 245        }
 246
 247    case 0x0c:                  /* clear flip flop */
 248        d->flip_flop = 0;
 249        break;
 250
 251    case 0x0d:                  /* reset */
 252        d->flip_flop = 0;
 253        d->mask = ~0;
 254        d->status = 0;
 255        d->command = 0;
 256        break;
 257
 258    case 0x0e:                  /* clear mask for all channels */
 259        d->mask = 0;
 260        DMA_run();
 261        break;
 262
 263    case 0x0f:                  /* write mask for all channels */
 264        d->mask = data;
 265        DMA_run();
 266        break;
 267
 268    default:
 269        dolog ("unknown iport %#x\n", iport);
 270        break;
 271    }
 272
 273#ifdef DEBUG_DMA
 274    if (0xc != iport) {
 275        linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n",
 276               nport, ichan, data);
 277    }
 278#endif
 279}
 280
 281static uint32_t read_cont (void *opaque, uint32_t nport)
 282{
 283    struct dma_cont *d = opaque;
 284    int iport, val;
 285
 286    iport = (nport >> d->dshift) & 0x0f;
 287    switch (iport) {
 288    case 0x08:                  /* status */
 289        val = d->status;
 290        d->status &= 0xf0;
 291        break;
 292    case 0x0f:                  /* mask */
 293        val = d->mask;
 294        break;
 295    default:
 296        val = 0;
 297        break;
 298    }
 299
 300    ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val);
 301    return val;
 302}
 303
 304int DMA_get_channel_mode (int nchan)
 305{
 306    return dma_controllers[nchan > 3].regs[nchan & 3].mode;
 307}
 308
 309void DMA_hold_DREQ (int nchan)
 310{
 311    int ncont, ichan;
 312
 313    ncont = nchan > 3;
 314    ichan = nchan & 3;
 315    linfo ("held cont=%d chan=%d\n", ncont, ichan);
 316    dma_controllers[ncont].status |= 1 << (ichan + 4);
 317    DMA_run();
 318}
 319
 320void DMA_release_DREQ (int nchan)
 321{
 322    int ncont, ichan;
 323
 324    ncont = nchan > 3;
 325    ichan = nchan & 3;
 326    linfo ("released cont=%d chan=%d\n", ncont, ichan);
 327    dma_controllers[ncont].status &= ~(1 << (ichan + 4));
 328    DMA_run();
 329}
 330
 331static void channel_run (int ncont, int ichan)
 332{
 333    int n;
 334    struct dma_regs *r = &dma_controllers[ncont].regs[ichan];
 335#ifdef DEBUG_DMA
 336    int dir, opmode;
 337
 338    dir = (r->mode >> 5) & 1;
 339    opmode = (r->mode >> 6) & 3;
 340
 341    if (dir) {
 342        dolog ("DMA in address decrement mode\n");
 343    }
 344    if (opmode != 1) {
 345        dolog ("DMA not in single mode select %#x\n", opmode);
 346    }
 347#endif
 348
 349    r = dma_controllers[ncont].regs + ichan;
 350    n = r->transfer_handler (r->opaque, ichan + (ncont << 2),
 351                             r->now[COUNT], (r->base[COUNT] + 1) << ncont);
 352    r->now[COUNT] = n;
 353    ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont);
 354}
 355
 356static QEMUBH *dma_bh;
 357
 358static void DMA_run (void)
 359{
 360    struct dma_cont *d;
 361    int icont, ichan;
 362    int rearm = 0;
 363
 364    d = dma_controllers;
 365
 366    for (icont = 0; icont < 2; icont++, d++) {
 367        for (ichan = 0; ichan < 4; ichan++) {
 368            int mask;
 369
 370            mask = 1 << ichan;
 371
 372            if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) {
 373                channel_run (icont, ichan);
 374                rearm = 1;
 375            }
 376        }
 377    }
 378
 379    if (rearm)
 380        qemu_bh_schedule_idle(dma_bh);
 381}
 382
 383static void DMA_run_bh(void *unused)
 384{
 385    DMA_run();
 386}
 387
 388void DMA_register_channel (int nchan,
 389                           DMA_transfer_handler transfer_handler,
 390                           void *opaque)
 391{
 392    struct dma_regs *r;
 393    int ichan, ncont;
 394
 395    ncont = nchan > 3;
 396    ichan = nchan & 3;
 397
 398    r = dma_controllers[ncont].regs + ichan;
 399    r->transfer_handler = transfer_handler;
 400    r->opaque = opaque;
 401}
 402
 403int DMA_read_memory (int nchan, void *buf, int pos, int len)
 404{
 405    struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
 406    target_phys_addr_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
 407
 408    if (r->mode & 0x20) {
 409        int i;
 410        uint8_t *p = buf;
 411
 412        cpu_physical_memory_read (addr - pos - len, buf, len);
 413        /* What about 16bit transfers? */
 414        for (i = 0; i < len >> 1; i++) {
 415            uint8_t b = p[len - i - 1];
 416            p[i] = b;
 417        }
 418    }
 419    else
 420        cpu_physical_memory_read (addr + pos, buf, len);
 421
 422    return len;
 423}
 424
 425int DMA_write_memory (int nchan, void *buf, int pos, int len)
 426{
 427    struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3];
 428    target_phys_addr_t addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR];
 429
 430    if (r->mode & 0x20) {
 431        int i;
 432        uint8_t *p = buf;
 433
 434        cpu_physical_memory_write (addr - pos - len, buf, len);
 435        /* What about 16bit transfers? */
 436        for (i = 0; i < len; i++) {
 437            uint8_t b = p[len - i - 1];
 438            p[i] = b;
 439        }
 440    }
 441    else
 442        cpu_physical_memory_write (addr + pos, buf, len);
 443
 444    return len;
 445}
 446
 447/* request the emulator to transfer a new DMA memory block ASAP */
 448void DMA_schedule(int nchan)
 449{
 450    CPUState *env = cpu_single_env;
 451    if (env)
 452        cpu_interrupt(env, CPU_INTERRUPT_EXIT);
 453}
 454
 455static void dma_reset(void *opaque)
 456{
 457    struct dma_cont *d = opaque;
 458    write_cont (d, (0x0d << d->dshift), 0);
 459}
 460
 461static int dma_phony_handler (void *opaque, int nchan, int dma_pos, int dma_len)
 462{
 463    dolog ("unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d\n",
 464           nchan, dma_pos, dma_len);
 465    return dma_pos;
 466}
 467
 468/* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */
 469static void dma_init2(struct dma_cont *d, int base, int dshift,
 470                      int page_base, int pageh_base)
 471{
 472    static const int page_port_list[] = { 0x1, 0x2, 0x3, 0x7 };
 473    int i;
 474
 475    d->dshift = dshift;
 476    for (i = 0; i < 8; i++) {
 477        register_ioport_write (base + (i << dshift), 1, 1, write_chan, d);
 478        register_ioport_read (base + (i << dshift), 1, 1, read_chan, d);
 479    }
 480    for (i = 0; i < ARRAY_SIZE (page_port_list); i++) {
 481        register_ioport_write (page_base + page_port_list[i], 1, 1,
 482                               write_page, d);
 483        register_ioport_read (page_base + page_port_list[i], 1, 1,
 484                              read_page, d);
 485        if (pageh_base >= 0) {
 486            register_ioport_write (pageh_base + page_port_list[i], 1, 1,
 487                                   write_pageh, d);
 488            register_ioport_read (pageh_base + page_port_list[i], 1, 1,
 489                                  read_pageh, d);
 490        }
 491    }
 492    for (i = 0; i < 8; i++) {
 493        register_ioport_write (base + ((i + 8) << dshift), 1, 1,
 494                               write_cont, d);
 495        register_ioport_read (base + ((i + 8) << dshift), 1, 1,
 496                              read_cont, d);
 497    }
 498    qemu_register_reset(dma_reset, d);
 499    dma_reset(d);
 500    for (i = 0; i < ARRAY_SIZE (d->regs); ++i) {
 501        d->regs[i].transfer_handler = dma_phony_handler;
 502    }
 503}
 504
 505static void dma_save (QEMUFile *f, void *opaque)
 506{
 507    struct dma_cont *d = opaque;
 508    int i;
 509
 510    /* qemu_put_8s (f, &d->status); */
 511    qemu_put_8s (f, &d->command);
 512    qemu_put_8s (f, &d->mask);
 513    qemu_put_8s (f, &d->flip_flop);
 514    qemu_put_be32 (f, d->dshift);
 515
 516    for (i = 0; i < 4; ++i) {
 517        struct dma_regs *r = &d->regs[i];
 518        qemu_put_be32 (f, r->now[0]);
 519        qemu_put_be32 (f, r->now[1]);
 520        qemu_put_be16s (f, &r->base[0]);
 521        qemu_put_be16s (f, &r->base[1]);
 522        qemu_put_8s (f, &r->mode);
 523        qemu_put_8s (f, &r->page);
 524        qemu_put_8s (f, &r->pageh);
 525        qemu_put_8s (f, &r->dack);
 526        qemu_put_8s (f, &r->eop);
 527    }
 528}
 529
 530static int dma_load (QEMUFile *f, void *opaque, int version_id)
 531{
 532    struct dma_cont *d = opaque;
 533    int i;
 534
 535    if (version_id != 1)
 536        return -EINVAL;
 537
 538    /* qemu_get_8s (f, &d->status); */
 539    qemu_get_8s (f, &d->command);
 540    qemu_get_8s (f, &d->mask);
 541    qemu_get_8s (f, &d->flip_flop);
 542    d->dshift=qemu_get_be32 (f);
 543
 544    for (i = 0; i < 4; ++i) {
 545        struct dma_regs *r = &d->regs[i];
 546        r->now[0]=qemu_get_be32 (f);
 547        r->now[1]=qemu_get_be32 (f);
 548        qemu_get_be16s (f, &r->base[0]);
 549        qemu_get_be16s (f, &r->base[1]);
 550        qemu_get_8s (f, &r->mode);
 551        qemu_get_8s (f, &r->page);
 552        qemu_get_8s (f, &r->pageh);
 553        qemu_get_8s (f, &r->dack);
 554        qemu_get_8s (f, &r->eop);
 555    }
 556
 557    DMA_run();
 558
 559    return 0;
 560}
 561
 562void DMA_init (int high_page_enable)
 563{
 564    dma_init2(&dma_controllers[0], 0x00, 0, 0x80,
 565              high_page_enable ? 0x480 : -1);
 566    dma_init2(&dma_controllers[1], 0xc0, 1, 0x88,
 567              high_page_enable ? 0x488 : -1);
 568    register_savevm ("dma", 0, 1, dma_save, dma_load, &dma_controllers[0]);
 569    register_savevm ("dma", 1, 1, dma_save, dma_load, &dma_controllers[1]);
 570
 571    dma_bh = qemu_bh_new(DMA_run_bh, NULL);
 572}
 573