qemu/hw/dma/xilinx-zdma.c
<<
>>
Prefs
   1/*
   2 * QEMU model of the ZDMA DMA designed to serve Display Port
   3 *
   4 * Copyright (c) 2014 Xilinx Inc.
   5 *
   6 * Partially autogenerated by xregqemu.py 2014-07-21.
   7 * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a copy
  10 * of this software and associated documentation files (the "Software"), to deal
  11 * in the Software without restriction, including without limitation the rights
  12 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  13 * copies of the Software, and to permit persons to whom the Software is
  14 * furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice shall be included in
  17 * all copies or substantial portions of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  24 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  25 * THE SOFTWARE.
  26 */
  27
  28#include "qemu/osdep.h"
  29#include "hw/sysbus.h"
  30#include "hw/register-dep.h"
  31#include "qemu/bitops.h"
  32#include "qemu/log.h"
  33#include "qapi/error.h"
  34#include "sysemu/dma.h"
  35
  36#ifndef XILINX_ZDMA_ERR_DEBUG
  37#define XILINX_ZDMA_ERR_DEBUG 0
  38#endif
  39
  40#define TYPE_XILINX_ZDMA "xlnx.zdma"
  41
  42#define XILINX_ZDMA(obj) \
  43     OBJECT_CHECK(ZDMA, (obj), TYPE_XILINX_ZDMA)
  44
  45#define ZDMA_INT_BF(REG) \
  46    DEP_FIELD(ZDMA_CH_ ## REG, DMA_PAUSE, 1, 11)       \
  47    DEP_FIELD(ZDMA_CH_ ## REG, DMA_DONE, 1, 10)        \
  48    DEP_FIELD(ZDMA_CH_ ## REG, AXI_WR_DATA, 1, 9)      \
  49    DEP_FIELD(ZDMA_CH_ ## REG, AXI_RD_DATA, 1, 8)      \
  50    DEP_FIELD(ZDMA_CH_ ## REG, AXI_RD_DST_DSCR, 1, 7)  \
  51    DEP_FIELD(ZDMA_CH_ ## REG, AXI_RD_SRC_DSCR, 1, 6)  \
  52    DEP_FIELD(ZDMA_CH_ ## REG, IRQ_DST_ACCT_ERR, 1, 5) \
  53    DEP_FIELD(ZDMA_CH_ ## REG, IRQ_SRC_ACCT_ERR, 1, 4) \
  54    DEP_FIELD(ZDMA_CH_ ## REG, BYTE_CNT_OVRFL, 1, 3)   \
  55    DEP_FIELD(ZDMA_CH_ ## REG, DST_DSCR_DONE, 1, 2)    \
  56    DEP_FIELD(ZDMA_CH_ ## REG, SRC_DSCR_DONE, 1, 1)    \
  57    DEP_FIELD(ZDMA_CH_ ## REG, INV_APB, 1, 0)
  58
  59DEP_REG32(ZDMA_ERR_CTRL, 0x0)
  60    DEP_FIELD(ZDMA_ERR_CTRL, APB_ERR_RES, 1, 0)
  61DEP_REG32(ZDMA_CH_ECO, 0x4)
  62DEP_REG32(ZDMA_CH_ISR, 0x100)
  63    ZDMA_INT_BF(ISR)
  64DEP_REG32(ZDMA_CH_IMR, 0x104)
  65    ZDMA_INT_BF(IMR)
  66DEP_REG32(ZDMA_CH_IEN, 0x108)
  67    ZDMA_INT_BF(IEN)
  68DEP_REG32(ZDMA_CH_IDS, 0x10c)
  69    ZDMA_INT_BF(IDS)
  70DEP_REG32(ZDMA_CH_CTRL0, 0x110)
  71    DEP_FIELD(ZDMA_CH_CTRL0, OVR_FETCH, 1, 7)
  72    DEP_FIELD(ZDMA_CH_CTRL0, POINT_TYPE, 1, 6)
  73    DEP_FIELD(ZDMA_CH_CTRL0, MODE, 2, 4)
  74    DEP_FIELD(ZDMA_CH_CTRL0, RATE_CNTL, 1, 3)
  75    DEP_FIELD(ZDMA_CH_CTRL0, CONT_ADDR, 1, 2)
  76    DEP_FIELD(ZDMA_CH_CTRL0, CONT, 1, 1)
  77DEP_REG32(ZDMA_CH_CTRL1, 0x114)
  78    DEP_FIELD(ZDMA_CH_CTRL1, DST_ISSUE, 5, 5)
  79    DEP_FIELD(ZDMA_CH_CTRL1, SRC_ISSUE, 5, 0)
  80DEP_REG32(ZDMA_CH_PERIF, 0x118)
  81    DEP_FIELD(ZDMA_CH_PERIF, PROG_CELL_CNT, 4, 2)
  82    DEP_FIELD(ZDMA_CH_PERIF, SIDE, 1, 1)
  83    DEP_FIELD(ZDMA_CH_PERIF, EN, 1, 0)
  84DEP_REG32(ZDMA_CH_STATUS, 0x11c)
  85    DEP_FIELD(ZDMA_CH_STATUS, STATE, 2, 0)
  86DEP_REG32(ZDMA_CH_DATA_ATTR, 0x120)
  87    DEP_FIELD(ZDMA_CH_DATA_ATTR, ARBURST, 2, 26)
  88    DEP_FIELD(ZDMA_CH_DATA_ATTR, ARCACHE, 4, 22)
  89    DEP_FIELD(ZDMA_CH_DATA_ATTR, ARQOS, 4, 18)
  90    DEP_FIELD(ZDMA_CH_DATA_ATTR, ARLEN, 4, 14)
  91    DEP_FIELD(ZDMA_CH_DATA_ATTR, AWBURST, 2, 12)
  92    DEP_FIELD(ZDMA_CH_DATA_ATTR, AWCACHE, 4, 8)
  93    DEP_FIELD(ZDMA_CH_DATA_ATTR, AWQOS, 4, 4)
  94    DEP_FIELD(ZDMA_CH_DATA_ATTR, AWLEN, 4, 0)
  95DEP_REG32(ZDMA_CH_DSCR_ATTR, 0x124)
  96    DEP_FIELD(ZDMA_CH_DSCR_ATTR, AXCOHRNT, 1, 8)
  97    DEP_FIELD(ZDMA_CH_DSCR_ATTR, AXCACHE, 4, 4)
  98    DEP_FIELD(ZDMA_CH_DSCR_ATTR, AXQOS, 4, 0)
  99DEP_REG32(ZDMA_CH_SRC_DSCR_WORD0, 0x128)
 100DEP_REG32(ZDMA_CH_SRC_DSCR_WORD1, 0x12c)
 101    DEP_FIELD(ZDMA_CH_SRC_DSCR_WORD1, MSB, 17, 0)
 102DEP_REG32(ZDMA_CH_SRC_DSCR_WORD2, 0x130)
 103    DEP_FIELD(ZDMA_CH_SRC_DSCR_WORD2, SIZE, 30, 0)
 104DEP_REG32(ZDMA_CH_SRC_DSCR_WORD3, 0x134)
 105    DEP_FIELD(ZDMA_CH_SRC_DSCR_WORD3, CMD, 2, 3)
 106    DEP_FIELD(ZDMA_CH_SRC_DSCR_WORD3, INTR, 1, 2)
 107    DEP_FIELD(ZDMA_CH_SRC_DSCR_WORD3, TYPE, 1, 1)
 108    DEP_FIELD(ZDMA_CH_SRC_DSCR_WORD3, COHRNT, 1, 0)
 109DEP_REG32(ZDMA_CH_DST_DSCR_WORD0, 0x138)
 110DEP_REG32(ZDMA_CH_DST_DSCR_WORD1, 0x13c)
 111    DEP_FIELD(ZDMA_CH_DST_DSCR_WORD1, MSB, 17, 0)
 112DEP_REG32(ZDMA_CH_DST_DSCR_WORD2, 0x140)
 113    DEP_FIELD(ZDMA_CH_DST_DSCR_WORD2, SIZE, 30, 0)
 114DEP_REG32(ZDMA_CH_DST_DSCR_WORD3, 0x144)
 115    DEP_FIELD(ZDMA_CH_DST_DSCR_WORD3, INTR, 1, 2)
 116    DEP_FIELD(ZDMA_CH_DST_DSCR_WORD3, TYPE, 1, 1)
 117    DEP_FIELD(ZDMA_CH_DST_DSCR_WORD3, COHRNT, 1, 0)
 118DEP_REG32(ZDMA_CH_WR_ONLY_WORD0, 0x148)
 119DEP_REG32(ZDMA_CH_WR_ONLY_WORD1, 0x14c)
 120DEP_REG32(ZDMA_CH_WR_ONLY_WORD2, 0x150)
 121DEP_REG32(ZDMA_CH_WR_ONLY_WORD3, 0x154)
 122DEP_REG32(ZDMA_CH_SRC_START_LSB, 0x158)
 123DEP_REG32(ZDMA_CH_SRC_START_MSB, 0x15c)
 124    DEP_FIELD(ZDMA_CH_SRC_START_MSB, ADDR, 17, 0)
 125DEP_REG32(ZDMA_CH_DST_START_LSB, 0x160)
 126DEP_REG32(ZDMA_CH_DST_START_MSB, 0x164)
 127    DEP_FIELD(ZDMA_CH_DST_START_MSB, ADDR, 17, 0)
 128DEP_REG32(ZDMA_CH_SRC_CUR_PYLD_LSB, 0x168)
 129DEP_REG32(ZDMA_CH_SRC_CUR_PYLD_MSB, 0x16c)
 130    DEP_FIELD(ZDMA_CH_SRC_CUR_PYLD_MSB, ADDR, 17, 0)
 131DEP_REG32(ZDMA_CH_DST_CUR_PYLD_LSB, 0x170)
 132DEP_REG32(ZDMA_CH_DST_CUR_PYLD_MSB, 0x174)
 133    DEP_FIELD(ZDMA_CH_DST_CUR_PYLD_MSB, ADDR, 17, 0)
 134DEP_REG32(ZDMA_CH_SRC_CUR_DSCR_LSB, 0x178)
 135DEP_REG32(ZDMA_CH_SRC_CUR_DSCR_MSB, 0x17c)
 136    DEP_FIELD(ZDMA_CH_SRC_CUR_DSCR_MSB, ADDR, 17, 0)
 137DEP_REG32(ZDMA_CH_DST_CUR_DSCR_LSB, 0x180)
 138DEP_REG32(ZDMA_CH_DST_CUR_DSCR_MSB, 0x184)
 139    DEP_FIELD(ZDMA_CH_DST_CUR_DSCR_MSB, ADDR, 17, 0)
 140DEP_REG32(ZDMA_CH_TOTAL_BYTE, 0x188)
 141DEP_REG32(ZDMA_CH_RATE_CNTL, 0x18c)
 142    DEP_FIELD(ZDMA_CH_RATE_CNTL, CNT, 12, 0)
 143DEP_REG32(ZDMA_CH_IRQ_SRC_ACCT, 0x190)
 144    DEP_FIELD(ZDMA_CH_IRQ_SRC_ACCT, CNT, 8, 0)
 145DEP_REG32(ZDMA_CH_IRQ_DST_ACCT, 0x194)
 146    DEP_FIELD(ZDMA_CH_IRQ_DST_ACCT, CNT, 8, 0)
 147DEP_REG32(ZDMA_CH_DBG0, 0x198)
 148    DEP_FIELD(ZDMA_CH_DBG0, CMN_BUF_FREE, 9, 0)
 149DEP_REG32(ZDMA_CH_DBG1, 0x19c)
 150    DEP_FIELD(ZDMA_CH_DBG1, CMN_BUF_OCC, 9, 0)
 151DEP_REG32(ZDMA_CH_CTRL2, 0x200)
 152    DEP_FIELD(ZDMA_CH_CTRL2, EN, 1, 0)
 153
 154#define R_MAX (R_ZDMA_CH_CTRL2 + 1)
 155
 156typedef enum {
 157    DISABLED = 0,
 158    ENABLED = 1,
 159    PAUSED = 2,
 160} ZDMAState;
 161
 162enum {
 163    PT_REG = 0,
 164    PT_MEM = 1,
 165};
 166
 167enum {
 168    CMD_HALT = 1,
 169    CMD_STOP = 2,
 170};
 171
 172enum {
 173    RW_MODE_RW = 0,
 174    RW_MODE_WO = 1,
 175    RW_MODE_RO = 2,
 176};
 177
 178enum {
 179    DTYPE_LINEAR = 0,
 180    DTYPE_LINKED = 1,
 181};
 182
 183enum {
 184    AXI_BURST_FIXED = 0,
 185    AXI_BURST_INCR  = 1,
 186};
 187
 188typedef union {
 189    struct {
 190        uint64_t addr;
 191        uint32_t size;
 192        uint32_t attr;
 193    };
 194    uint32_t words[4];
 195} ZDMADescr;
 196
 197typedef struct ZDMA {
 198    SysBusDevice parent_obj;
 199    MemoryRegion iomem;
 200    MemTxAttrs *attr;
 201    MemoryRegion *dma_mr;
 202    AddressSpace *dma_as;
 203    qemu_irq irq_zdma_ch0;
 204
 205    struct {
 206        uint32_t bus_width;
 207    } cfg;
 208
 209    ZDMAState state;
 210    bool error;
 211
 212    ZDMADescr dsc_src;
 213    ZDMADescr dsc_dst;
 214
 215    uint32_t regs[R_MAX];
 216    DepRegisterInfo regs_info[R_MAX];
 217
 218    /* We don't model the common bufs. Must be at least 16 bytes
 219       to model write only mode.  */
 220    uint8_t buf[2048];
 221} ZDMA;
 222
 223static void zdma_ch_update_irq(ZDMA *s)
 224{
 225    bool pending;
 226
 227    pending = s->regs[R_ZDMA_CH_ISR] & ~s->regs[R_ZDMA_CH_IMR];
 228
 229    qemu_set_irq(s->irq_zdma_ch0, pending);
 230}
 231
 232static void zdma_ch_isr_postw(DepRegisterInfo *reg, uint64_t val64)
 233{
 234    ZDMA *s = XILINX_ZDMA(reg->opaque);
 235    zdma_ch_update_irq(s);
 236}
 237
 238static uint64_t zdma_ch_ien_prew(DepRegisterInfo *reg, uint64_t val64)
 239{
 240    ZDMA *s = XILINX_ZDMA(reg->opaque);
 241    uint32_t val = val64;
 242
 243    s->regs[R_ZDMA_CH_IMR] &= ~val;
 244    zdma_ch_update_irq(s);
 245    return 0;
 246}
 247
 248static uint64_t zdma_ch_ids_prew(DepRegisterInfo *reg, uint64_t val64)
 249{
 250    ZDMA *s = XILINX_ZDMA(reg->opaque);
 251    uint32_t val = val64;
 252
 253    s->regs[R_ZDMA_CH_IMR] |= val;
 254    zdma_ch_update_irq(s);
 255    return 0;
 256}
 257
 258static void zdma_set_state(ZDMA *s, ZDMAState state)
 259{
 260    s->state = state;
 261    DEP_AF_DP32(s->regs, ZDMA_CH_STATUS, STATE, state);
 262
 263    /* Signal error if we have an error condition.  */
 264    if (s->error) {
 265        DEP_AF_DP32(s->regs, ZDMA_CH_STATUS, STATE, 3);
 266    }
 267}
 268
 269/* FIXME: Glue()  */
 270static void zdma_src_done(ZDMA *s)
 271{
 272    unsigned int cnt;
 273    cnt = DEP_AF_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT);
 274    cnt++;
 275    DEP_AF_DP32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT, cnt);
 276    DEP_AF_DP32(s->regs, ZDMA_CH_ISR, SRC_DSCR_DONE, true);
 277
 278    /* Did we overflow?  */
 279    if (cnt != DEP_AF_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT)) {
 280        DEP_AF_DP32(s->regs, ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, true);
 281    }
 282    zdma_ch_update_irq(s);
 283}
 284
 285static void zdma_dst_done(ZDMA *s)
 286{
 287    unsigned int cnt;
 288    cnt = DEP_AF_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT);
 289    cnt++;
 290    DEP_AF_DP32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT, cnt);
 291    DEP_AF_DP32(s->regs, ZDMA_CH_ISR, DST_DSCR_DONE, true);
 292
 293    /* Did we overflow?  */
 294    if (cnt != DEP_AF_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT)) {
 295        DEP_AF_DP32(s->regs, ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, true);
 296    }
 297    zdma_ch_update_irq(s);
 298}
 299
 300static uint64_t zdma_get_regaddr64(ZDMA *s, unsigned int basereg)
 301{
 302    uint64_t addr;
 303
 304    addr = s->regs[basereg + 1];
 305    addr <<= 32;
 306    addr |= s->regs[basereg];
 307
 308    return addr;
 309}
 310
 311static void zdma_put_regaddr64(ZDMA *s, unsigned int basereg, uint64_t addr)
 312{
 313    s->regs[basereg] = addr;
 314    s->regs[basereg + 1] = addr >> 32;
 315}
 316
 317static bool zdma_load_descriptor(ZDMA *s, uint64_t addr, void *buf)
 318{
 319    /* ZDMA descriptors must be aligned to their own size.  */
 320    if (addr % sizeof(ZDMADescr)) {
 321        qemu_log_mask(LOG_GUEST_ERROR,
 322                      "zdma: unaligned descriptor at %" PRIx64,
 323                      addr);
 324        memset(buf, 0xdeadbeef, sizeof(ZDMADescr));
 325        s->error = true;
 326        return false;
 327    }
 328
 329    /* Load descriptors. FIXME: handle endiannes conversion.  */
 330    address_space_rw(s->dma_as, addr, *s->attr, buf, sizeof(ZDMADescr), false);
 331    return true;
 332}
 333
 334static void zdma_load_src_descriptor(ZDMA *s)
 335{
 336    uint64_t src_addr;
 337    unsigned int ptype = DEP_AF_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
 338
 339    if (ptype == PT_REG) {
 340        memcpy(&s->dsc_src, &s->regs[R_ZDMA_CH_SRC_DSCR_WORD0],
 341               sizeof(s->dsc_src));
 342        return;
 343    }
 344
 345    src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB);
 346
 347    if (!zdma_load_descriptor(s, src_addr, &s->dsc_src)) {
 348        DEP_AF_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_SRC_DSCR, true);
 349    }
 350}
 351
 352static void zdma_load_dst_descriptor(ZDMA *s)
 353{
 354    uint64_t dst_addr;
 355    unsigned int ptype = DEP_AF_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
 356
 357    if (ptype == PT_REG) {
 358        memcpy(&s->dsc_dst, &s->regs[R_ZDMA_CH_DST_DSCR_WORD0],
 359               sizeof(s->dsc_src));
 360        return;
 361    }
 362
 363    dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB);
 364
 365    if (!zdma_load_descriptor(s, dst_addr, &s->dsc_dst)) {
 366        DEP_AF_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_DST_DSCR, true);
 367    }
 368}
 369
 370static uint64_t zdma_update_descr_addr(ZDMA *s, bool type, unsigned int basereg)
 371{
 372    uint64_t addr, next;
 373
 374    if (type == DTYPE_LINEAR) {
 375        next = zdma_get_regaddr64(s, basereg);
 376        next += sizeof(s->dsc_dst);
 377        zdma_put_regaddr64(s, basereg, next);
 378    } else {
 379        addr = zdma_get_regaddr64(s, basereg);
 380        addr += sizeof(s->dsc_dst);
 381        address_space_rw(s->dma_as, addr, *s->attr, (void *) &next, 8, false);
 382        zdma_put_regaddr64(s, basereg, next);
 383    }
 384    return next;
 385}
 386
 387static void zdma_write_dst(ZDMA *s, uint8_t *buf, uint32_t len)
 388{
 389    uint32_t dst_size, dlen;
 390    bool dst_intr, dst_type;
 391    unsigned int ptype = DEP_AF_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
 392    unsigned int rw_mode = DEP_AF_EX32(s->regs, ZDMA_CH_CTRL0, MODE);
 393    unsigned int burst_type = DEP_AF_EX32(s->regs, ZDMA_CH_DATA_ATTR, AWBURST);
 394
 395    /* FIXED burst types are only supported in simple dma mode.  */
 396    if (ptype != PT_REG) {
 397        burst_type = AXI_BURST_INCR;
 398    }
 399
 400    while (len) {
 401        dst_size = DEP_F_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2, SIZE);
 402        dst_type = DEP_F_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3, TYPE);
 403        if (dst_size == 0 && ptype == PT_MEM) {
 404            uint64_t next;
 405            next = zdma_update_descr_addr(s, dst_type, R_ZDMA_CH_DST_CUR_DSCR_LSB);
 406            zdma_load_descriptor(s, next, &s->dsc_dst);
 407            dst_size = DEP_F_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2, SIZE);
 408            dst_type = DEP_F_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3, TYPE);
 409        }
 410
 411        /* Match what hardware does by ignoring the dst_size and only using
 412         * the src size for Simple register mode.  */
 413        if (ptype == PT_REG && rw_mode != RW_MODE_WO) {
 414            dst_size = len;
 415        }
 416
 417        if (dst_size == 0) {
 418            qemu_log("Can't write to dst\n");
 419        }
 420        dst_intr = DEP_F_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3, INTR);
 421
 422        dlen = len > dst_size ? dst_size : len;
 423        if (burst_type == AXI_BURST_FIXED) {
 424            if (dlen > (s->cfg.bus_width / 8)) {
 425                dlen = s->cfg.bus_width / 8;
 426            }
 427        }
 428
 429        address_space_rw(s->dma_as, s->dsc_dst.addr, *s->attr, buf, dlen,
 430                         true);
 431        if (burst_type == AXI_BURST_INCR) {
 432            s->dsc_dst.addr += dlen;
 433        }
 434        dst_size -= dlen;
 435        buf += dlen;
 436        len -= dlen;
 437
 438        if (dst_size == 0 && dst_intr) {
 439            zdma_dst_done(s);
 440        }
 441
 442        /* Write back to buffered descriptor.  */
 443        s->dsc_dst.words[2] = DEP_F_DP32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2, SIZE, dst_size);
 444    }
 445}
 446
 447static void zdma_process_descr(ZDMA *s)
 448{
 449    uint64_t src_addr;
 450    uint32_t src_size, len;
 451    unsigned int src_cmd;
 452    bool src_intr, src_type;
 453    unsigned int ptype = DEP_AF_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
 454    unsigned int rw_mode = DEP_AF_EX32(s->regs, ZDMA_CH_CTRL0, MODE);
 455    unsigned int burst_type = DEP_AF_EX32(s->regs, ZDMA_CH_DATA_ATTR, ARBURST);
 456
 457    src_addr = s->dsc_src.addr;
 458    src_size = DEP_F_EX32(s->dsc_src.words[2], ZDMA_CH_SRC_DSCR_WORD2, SIZE);
 459    src_cmd = DEP_F_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, CMD);
 460    src_type = DEP_F_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, TYPE);
 461    src_intr = DEP_F_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, INTR);
 462
 463    /* FIXED burst types and non-rw modes are only supported in
 464     * simple dma mode.
 465     */
 466    if (ptype != PT_REG) {
 467        if (rw_mode != RW_MODE_RW) {
 468            qemu_log_mask(LOG_GUEST_ERROR,
 469                          "zDMA: rw-mode=%d but not simple DMA mode.\n",
 470                          rw_mode);
 471        }
 472        if (burst_type != AXI_BURST_INCR) {
 473            qemu_log_mask(LOG_GUEST_ERROR,
 474                          "zDMA: burst_type=%d but not simple DMA mode.\n",
 475                          burst_type);
 476        }
 477        burst_type = AXI_BURST_INCR;
 478        rw_mode = RW_MODE_RW;
 479    }
 480
 481    if (rw_mode == RW_MODE_WO) {
 482        /* In Simple DMA Write-Only, we need to push DST size bytes
 483         * regardless of what SRC size is set to.  */
 484        src_size = DEP_F_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2, SIZE);
 485        memcpy(s->buf, &s->regs[R_ZDMA_CH_WR_ONLY_WORD0], s->cfg.bus_width / 8);
 486    }
 487
 488    while (src_size) {
 489        len = src_size > ARRAY_SIZE(s->buf) ? ARRAY_SIZE(s->buf) : src_size;
 490        if (burst_type == AXI_BURST_FIXED) {
 491            if (len > (s->cfg.bus_width / 8)) {
 492                len = s->cfg.bus_width / 8;
 493            }
 494        }
 495
 496        if (rw_mode == RW_MODE_WO) {
 497            if (len > s->cfg.bus_width / 8) {
 498                len = s->cfg.bus_width / 8;
 499            }
 500        } else {
 501            address_space_rw(s->dma_as, src_addr, *s->attr, s->buf, len,
 502                             false);
 503            if (burst_type == AXI_BURST_INCR) {
 504                src_addr += len;
 505            }
 506        }
 507
 508        if (rw_mode != RW_MODE_RO) {
 509            zdma_write_dst(s, s->buf, len);
 510        }
 511
 512        s->regs[R_ZDMA_CH_TOTAL_BYTE] += len;
 513        src_size -= len;
 514
 515        if (src_size == 0) {
 516            DEP_AF_DP32(s->regs, ZDMA_CH_ISR, DMA_DONE, true);
 517        }
 518
 519    }
 520
 521    if (src_intr) {
 522        zdma_src_done(s);
 523    }
 524
 525    /* Load next descriptor.  */
 526    if (ptype == PT_REG || src_cmd == CMD_STOP) {
 527        DEP_AF_DP32(s->regs, ZDMA_CH_CTRL2, EN, 0);
 528        zdma_set_state(s, DISABLED);
 529        return;
 530    }
 531
 532    if (src_cmd == CMD_HALT) {
 533        zdma_set_state(s, PAUSED);
 534        DEP_AF_DP32(s->regs, ZDMA_CH_ISR, DMA_PAUSE, 1);
 535        zdma_ch_update_irq(s);
 536        return;
 537    }
 538
 539    zdma_update_descr_addr(s, src_type, R_ZDMA_CH_SRC_CUR_DSCR_LSB);
 540}
 541
 542static void zdma_run(ZDMA *s)
 543{
 544    while (s->state == ENABLED && !s->error) {
 545        zdma_load_src_descriptor(s);
 546
 547        if (s->error) {
 548            zdma_set_state(s, DISABLED);
 549        } else {
 550            zdma_process_descr(s);
 551        }
 552    }
 553
 554    zdma_ch_update_irq(s);
 555}
 556
 557static void zdma_update_descr_addr_from_start(ZDMA *s)
 558{
 559    uint64_t src_addr, dst_addr;
 560
 561    src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_START_LSB);
 562    zdma_put_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB, src_addr);
 563    dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_START_LSB);
 564    zdma_put_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB, dst_addr);
 565    zdma_load_dst_descriptor(s);
 566}
 567
 568static void zdma_ch_ctrlx_postw(DepRegisterInfo *reg, uint64_t val64)
 569{
 570    ZDMA *s = XILINX_ZDMA(reg->opaque);
 571
 572    if (DEP_AF_EX32(s->regs, ZDMA_CH_CTRL2, EN)) {
 573        s->error = false;
 574
 575        if (s->state == PAUSED && DEP_AF_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) {
 576            if (DEP_AF_EX32(s->regs, ZDMA_CH_CTRL0, CONT_ADDR) == 1) {
 577                zdma_update_descr_addr_from_start(s);
 578            } else {
 579                /* FIXME: Not sure if this is right. With asymetric descriptor
 580                   setups, we rely on reusing the DST descriptor buffer.  */
 581                bool src_type = DEP_F_EX32(s->dsc_src.words[3],
 582                                       ZDMA_CH_SRC_DSCR_WORD3, TYPE);
 583                zdma_update_descr_addr(s, src_type,
 584                                          R_ZDMA_CH_SRC_CUR_DSCR_LSB);
 585            }
 586        } else {
 587            zdma_update_descr_addr_from_start(s);
 588        }
 589        zdma_set_state(s, ENABLED);
 590    } else {
 591        /* Leave Paused state?  */
 592        if (s->state == PAUSED && DEP_AF_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) {
 593            zdma_set_state(s, DISABLED);
 594        }
 595    }
 596
 597    zdma_run(s);
 598}
 599
 600static DepRegisterAccessInfo zdma_regs_info[] = {
 601    {   .name = "ZDMA_ERR_CTRL",  .decode.addr = A_ZDMA_ERR_CTRL,
 602        .rsvd = 0xfffffffe,
 603    },{ .name = "ZDMA_CH_ECO",  .decode.addr = A_ZDMA_CH_ECO,
 604    },{ .name = "ZDMA_CH_ISR",  .decode.addr = A_ZDMA_CH_ISR,
 605        .rsvd = 0xfffff000,
 606        .w1c = 0xfff,
 607        .post_write = zdma_ch_isr_postw,
 608    },{ .name = "ZDMA_CH_IMR",  .decode.addr = A_ZDMA_CH_IMR,
 609        .reset = 0xfff,
 610        .rsvd = 0xfffff000,
 611        .ro = 0xfff,
 612    },{ .name = "ZDMA_CH_IEN",  .decode.addr = A_ZDMA_CH_IEN,
 613        .rsvd = 0xfffff000,
 614        .pre_write = zdma_ch_ien_prew,
 615    },{ .name = "ZDMA_CH_IDS",  .decode.addr = A_ZDMA_CH_IDS,
 616        .rsvd = 0xfffff000,
 617        .pre_write = zdma_ch_ids_prew,
 618    },{ .name = "ZDMA_CH_CTRL0",  .decode.addr = A_ZDMA_CH_CTRL0,
 619        .reset = 0x80,
 620        .rsvd = 0xffffff01,
 621        .post_write = zdma_ch_ctrlx_postw,
 622    },{ .name = "ZDMA_CH_CTRL1",  .decode.addr = A_ZDMA_CH_CTRL1,
 623        .reset = 0x3ff,
 624        .rsvd = 0xfffffc00,
 625    },{ .name = "ZDMA_CH_PERIF",  .decode.addr = A_ZDMA_CH_PERIF,
 626        .rsvd = 0xffffffc0,
 627    },{ .name = "ZDMA_CH_STATUS",  .decode.addr = A_ZDMA_CH_STATUS,
 628        .rsvd = 0xfffffffc,
 629        .ro = 0x3,
 630    },{ .name = "ZDMA_CH_DATA_ATTR",  .decode.addr = A_ZDMA_CH_DATA_ATTR,
 631        .reset = 0x483d20f,
 632        .rsvd = 0xf0000000,
 633    },{ .name = "ZDMA_CH_DSCR_ATTR",  .decode.addr = A_ZDMA_CH_DSCR_ATTR,
 634        .rsvd = 0xfffffe00,
 635    },{ .name = "ZDMA_CH_SRC_DSCR_WORD0",  .decode.addr = A_ZDMA_CH_SRC_DSCR_WORD0,
 636    },{ .name = "ZDMA_CH_SRC_DSCR_WORD1",  .decode.addr = A_ZDMA_CH_SRC_DSCR_WORD1,
 637        .rsvd = 0xfffe0000,
 638    },{ .name = "ZDMA_CH_SRC_DSCR_WORD2",  .decode.addr = A_ZDMA_CH_SRC_DSCR_WORD2,
 639        .rsvd = 0xc0000000,
 640    },{ .name = "ZDMA_CH_SRC_DSCR_WORD3",  .decode.addr = A_ZDMA_CH_SRC_DSCR_WORD3,
 641        .rsvd = 0xffffffe0,
 642    },{ .name = "ZDMA_CH_DST_DSCR_WORD0",  .decode.addr = A_ZDMA_CH_DST_DSCR_WORD0,
 643    },{ .name = "ZDMA_CH_DST_DSCR_WORD1",  .decode.addr = A_ZDMA_CH_DST_DSCR_WORD1,
 644        .rsvd = 0xfffe0000,
 645    },{ .name = "ZDMA_CH_DST_DSCR_WORD2",  .decode.addr = A_ZDMA_CH_DST_DSCR_WORD2,
 646        .rsvd = 0xc0000000,
 647    },{ .name = "ZDMA_CH_DST_DSCR_WORD3",  .decode.addr = A_ZDMA_CH_DST_DSCR_WORD3,
 648        .rsvd = 0xfffffffa,
 649    },{ .name = "ZDMA_CH_WR_ONLY_WORD0",  .decode.addr = A_ZDMA_CH_WR_ONLY_WORD0,
 650    },{ .name = "ZDMA_CH_WR_ONLY_WORD1",  .decode.addr = A_ZDMA_CH_WR_ONLY_WORD1,
 651    },{ .name = "ZDMA_CH_WR_ONLY_WORD2",  .decode.addr = A_ZDMA_CH_WR_ONLY_WORD2,
 652    },{ .name = "ZDMA_CH_WR_ONLY_WORD3",  .decode.addr = A_ZDMA_CH_WR_ONLY_WORD3,
 653    },{ .name = "ZDMA_CH_SRC_START_LSB",  .decode.addr = A_ZDMA_CH_SRC_START_LSB,
 654    },{ .name = "ZDMA_CH_SRC_START_MSB",  .decode.addr = A_ZDMA_CH_SRC_START_MSB,
 655        .rsvd = 0xfffe0000,
 656    },{ .name = "ZDMA_CH_DST_START_LSB",  .decode.addr = A_ZDMA_CH_DST_START_LSB,
 657    },{ .name = "ZDMA_CH_DST_START_MSB",  .decode.addr = A_ZDMA_CH_DST_START_MSB,
 658        .rsvd = 0xfffe0000,
 659    },{ .name = "ZDMA_CH_SRC_CUR_PYLD_LSB",  .decode.addr = A_ZDMA_CH_SRC_CUR_PYLD_LSB,
 660        .ro = 0xffffffff,
 661    },{ .name = "ZDMA_CH_SRC_CUR_PYLD_MSB",  .decode.addr = A_ZDMA_CH_SRC_CUR_PYLD_MSB,
 662        .rsvd = 0xfffe0000,
 663        .ro = 0x1ffff,
 664    },{ .name = "ZDMA_CH_DST_CUR_PYLD_LSB",  .decode.addr = A_ZDMA_CH_DST_CUR_PYLD_LSB,
 665        .ro = 0xffffffff,
 666    },{ .name = "ZDMA_CH_DST_CUR_PYLD_MSB",  .decode.addr = A_ZDMA_CH_DST_CUR_PYLD_MSB,
 667        .rsvd = 0xfffe0000,
 668        .ro = 0x1ffff,
 669    },{ .name = "ZDMA_CH_SRC_CUR_DSCR_LSB",  .decode.addr = A_ZDMA_CH_SRC_CUR_DSCR_LSB,
 670        .ro = 0xffffffff,
 671    },{ .name = "ZDMA_CH_SRC_CUR_DSCR_MSB",  .decode.addr = A_ZDMA_CH_SRC_CUR_DSCR_MSB,
 672        .rsvd = 0xfffe0000,
 673        .ro = 0x1ffff,
 674    },{ .name = "ZDMA_CH_DST_CUR_DSCR_LSB",  .decode.addr = A_ZDMA_CH_DST_CUR_DSCR_LSB,
 675        .ro = 0xffffffff,
 676    },{ .name = "ZDMA_CH_DST_CUR_DSCR_MSB",  .decode.addr = A_ZDMA_CH_DST_CUR_DSCR_MSB,
 677        .rsvd = 0xfffe0000,
 678        .ro = 0x1ffff,
 679    },{ .name = "ZDMA_CH_TOTAL_BYTE",  .decode.addr = A_ZDMA_CH_TOTAL_BYTE,
 680        .w1c = 0xffffffff,
 681    },{ .name = "ZDMA_CH_RATE_CNTL",  .decode.addr = A_ZDMA_CH_RATE_CNTL,
 682        .rsvd = 0xfffff000,
 683    },{ .name = "ZDMA_CH_IRQ_SRC_ACCT",  .decode.addr = A_ZDMA_CH_IRQ_SRC_ACCT,
 684        .rsvd = 0xffffff00,
 685        .ro = 0xff,
 686        .cor = 0xff,
 687    },{ .name = "ZDMA_CH_IRQ_DST_ACCT",  .decode.addr = A_ZDMA_CH_IRQ_DST_ACCT,
 688        .rsvd = 0xffffff00,
 689        .ro = 0xff,
 690        .cor = 0xff,
 691    },{ .name = "ZDMA_CH_DBG0",  .decode.addr = A_ZDMA_CH_DBG0,
 692        .rsvd = 0xfffffe00,
 693        .ro = 0x1ff,
 694    },{ .name = "ZDMA_CH_DBG1",  .decode.addr = A_ZDMA_CH_DBG1,
 695        .rsvd = 0xfffffe00,
 696        .ro = 0x1ff,
 697    },{ .name = "ZDMA_CH_CTRL2",  .decode.addr = A_ZDMA_CH_CTRL2,
 698        .rsvd = 0xfffffffe,
 699        .post_write = zdma_ch_ctrlx_postw,
 700    }
 701};
 702
 703static void zdma_reset(DeviceState *dev)
 704{
 705    ZDMA *s = XILINX_ZDMA(dev);
 706    unsigned int i;
 707
 708    for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
 709        dep_register_reset(&s->regs_info[i]);
 710    }
 711
 712    zdma_ch_update_irq(s);
 713}
 714
 715static uint64_t zdma_read(void *opaque, hwaddr addr, unsigned size)
 716{
 717    ZDMA *s = XILINX_ZDMA(opaque);
 718    DepRegisterInfo *r = &s->regs_info[addr / 4];
 719
 720    if (!r->data) {
 721        qemu_log("%s: Decode error: read from %" HWADDR_PRIx "\n",
 722                 object_get_canonical_path(OBJECT(s)),
 723                 addr);
 724        DEP_AF_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
 725        zdma_ch_update_irq(s);
 726        return 0;
 727    }
 728    return dep_register_read(r);
 729}
 730
 731static void zdma_write(void *opaque, hwaddr addr, uint64_t value,
 732                      unsigned size)
 733{
 734    ZDMA *s = XILINX_ZDMA(opaque);
 735    DepRegisterInfo *r = &s->regs_info[addr / 4];
 736
 737    if (!r->data) {
 738        qemu_log("%s: Decode error: write to %" HWADDR_PRIx "=%" PRIx64 "\n",
 739                 object_get_canonical_path(OBJECT(s)),
 740                 addr, value);
 741        DEP_AF_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
 742        zdma_ch_update_irq(s);
 743        return;
 744    }
 745    dep_register_write(r, value, ~0);
 746}
 747
 748static const MemoryRegionOps zdma_ops = {
 749    .read = zdma_read,
 750    .write = zdma_write,
 751    .endianness = DEVICE_LITTLE_ENDIAN,
 752    .valid = {
 753        .min_access_size = 4,
 754        .max_access_size = 4,
 755    },
 756};
 757
 758static void zdma_realize(DeviceState *dev, Error **errp)
 759{
 760    ZDMA *s = XILINX_ZDMA(dev);
 761    const char *prefix = object_get_canonical_path(OBJECT(dev));
 762    unsigned int i;
 763
 764    for (i = 0; i < ARRAY_SIZE(zdma_regs_info); ++i) {
 765        DepRegisterInfo *r = &s->regs_info[zdma_regs_info[i].decode.addr/4];
 766
 767        *r = (DepRegisterInfo) {
 768            .data = (uint8_t *)&s->regs[
 769                    zdma_regs_info[i].decode.addr/4],
 770            .data_size = sizeof(uint32_t),
 771            .access = &zdma_regs_info[i],
 772            .debug = XILINX_ZDMA_ERR_DEBUG,
 773            .prefix = prefix,
 774            .opaque = s,
 775        };
 776    }
 777
 778    if (s->dma_mr) {
 779        s->dma_as = g_malloc0(sizeof(AddressSpace));
 780        address_space_init(s->dma_as, s->dma_mr, NULL);
 781    } else {
 782        s->dma_as = &address_space_memory;
 783    }
 784}
 785
 786static void zdma_init(Object *obj)
 787{
 788    ZDMA *s = XILINX_ZDMA(obj);
 789    SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
 790
 791    memory_region_init_io(&s->iomem, obj, &zdma_ops, s,
 792                          TYPE_XILINX_ZDMA, R_MAX * 4);
 793    sysbus_init_mmio(sbd, &s->iomem);
 794    sysbus_init_irq(sbd, &s->irq_zdma_ch0);
 795
 796    object_property_add_link(obj, "dma", TYPE_MEMORY_REGION,
 797                             (Object **)&s->dma_mr,
 798                             qdev_prop_allow_set_link_before_realize,
 799                             OBJ_PROP_LINK_UNREF_ON_RELEASE,
 800                             &error_abort);
 801    object_property_add_link(obj, "memattr", TYPE_MEMORY_TRANSACTION_ATTR,
 802                             (Object **)&s->attr,
 803                             qdev_prop_allow_set_link_before_realize,
 804                             OBJ_PROP_LINK_UNREF_ON_RELEASE,
 805                             &error_abort);
 806}
 807
 808static const VMStateDescription vmstate_zdma = {
 809    .name = TYPE_XILINX_ZDMA,
 810    .version_id = 1,
 811    .minimum_version_id = 1,
 812    .minimum_version_id_old = 1,
 813    .fields = (VMStateField[]) {
 814        VMSTATE_UINT32_ARRAY(regs, ZDMA, R_MAX),
 815        VMSTATE_END_OF_LIST(),
 816    }
 817};
 818
 819static Property zdma_props[] = {
 820    DEFINE_PROP_UINT32("bus-width", ZDMA, cfg.bus_width, 64),
 821    DEFINE_PROP_END_OF_LIST(),
 822};
 823
 824static void zdma_class_init(ObjectClass *klass, void *data)
 825{
 826    DeviceClass *dc = DEVICE_CLASS(klass);
 827
 828    dc->reset = zdma_reset;
 829    dc->realize = zdma_realize;
 830    dc->props = zdma_props;
 831    dc->vmsd = &vmstate_zdma;
 832}
 833
 834static const TypeInfo zdma_info = {
 835    .name          = TYPE_XILINX_ZDMA,
 836    .parent        = TYPE_SYS_BUS_DEVICE,
 837    .instance_size = sizeof(ZDMA),
 838    .class_init    = zdma_class_init,
 839    .instance_init = zdma_init,
 840};
 841
 842static void zdma_register_types(void)
 843{
 844    type_register_static(&zdma_info);
 845}
 846
 847type_init(zdma_register_types)
 848