qemu/hw/dma/xlnx-zdma.c
<<
>>
Prefs
   1/*
   2 * QEMU model of the ZynqMP generic DMA
   3 *
   4 * Copyright (c) 2014 Xilinx Inc.
   5 * Copyright (c) 2018 FEIMTECH AB
   6 *
   7 * Written by Edgar E. Iglesias <edgar.iglesias@xilinx.com>,
   8 *            Francisco Iglesias <francisco.iglesias@feimtech.se>
   9 *
  10 * Permission is hereby granted, free of charge, to any person obtaining a copy
  11 * of this software and associated documentation files (the "Software"), to deal
  12 * in the Software without restriction, including without limitation the rights
  13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  14 * copies of the Software, and to permit persons to whom the Software is
  15 * furnished to do so, subject to the following conditions:
  16 *
  17 * The above copyright notice and this permission notice shall be included in
  18 * all copies or substantial portions of the Software.
  19 *
  20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  26 * THE SOFTWARE.
  27 */
  28
  29#include "qemu/osdep.h"
  30#include "hw/dma/xlnx-zdma.h"
  31#include "hw/irq.h"
  32#include "hw/qdev-properties.h"
  33#include "migration/vmstate.h"
  34#include "qemu/bitops.h"
  35#include "qemu/log.h"
  36#include "qemu/module.h"
  37#include "qapi/error.h"
  38
  39#ifndef XLNX_ZDMA_ERR_DEBUG
  40#define XLNX_ZDMA_ERR_DEBUG 0
  41#endif
  42
  43REG32(ZDMA_ERR_CTRL, 0x0)
  44    FIELD(ZDMA_ERR_CTRL, APB_ERR_RES, 0, 1)
  45REG32(ZDMA_CH_ISR, 0x100)
  46    FIELD(ZDMA_CH_ISR, DMA_PAUSE, 11, 1)
  47    FIELD(ZDMA_CH_ISR, DMA_DONE, 10, 1)
  48    FIELD(ZDMA_CH_ISR, AXI_WR_DATA, 9, 1)
  49    FIELD(ZDMA_CH_ISR, AXI_RD_DATA, 8, 1)
  50    FIELD(ZDMA_CH_ISR, AXI_RD_DST_DSCR, 7, 1)
  51    FIELD(ZDMA_CH_ISR, AXI_RD_SRC_DSCR, 6, 1)
  52    FIELD(ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, 5, 1)
  53    FIELD(ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, 4, 1)
  54    FIELD(ZDMA_CH_ISR, BYTE_CNT_OVRFL, 3, 1)
  55    FIELD(ZDMA_CH_ISR, DST_DSCR_DONE, 2, 1)
  56    FIELD(ZDMA_CH_ISR, SRC_DSCR_DONE, 1, 1)
  57    FIELD(ZDMA_CH_ISR, INV_APB, 0, 1)
  58REG32(ZDMA_CH_IMR, 0x104)
  59    FIELD(ZDMA_CH_IMR, DMA_PAUSE, 11, 1)
  60    FIELD(ZDMA_CH_IMR, DMA_DONE, 10, 1)
  61    FIELD(ZDMA_CH_IMR, AXI_WR_DATA, 9, 1)
  62    FIELD(ZDMA_CH_IMR, AXI_RD_DATA, 8, 1)
  63    FIELD(ZDMA_CH_IMR, AXI_RD_DST_DSCR, 7, 1)
  64    FIELD(ZDMA_CH_IMR, AXI_RD_SRC_DSCR, 6, 1)
  65    FIELD(ZDMA_CH_IMR, IRQ_DST_ACCT_ERR, 5, 1)
  66    FIELD(ZDMA_CH_IMR, IRQ_SRC_ACCT_ERR, 4, 1)
  67    FIELD(ZDMA_CH_IMR, BYTE_CNT_OVRFL, 3, 1)
  68    FIELD(ZDMA_CH_IMR, DST_DSCR_DONE, 2, 1)
  69    FIELD(ZDMA_CH_IMR, SRC_DSCR_DONE, 1, 1)
  70    FIELD(ZDMA_CH_IMR, INV_APB, 0, 1)
  71REG32(ZDMA_CH_IEN, 0x108)
  72    FIELD(ZDMA_CH_IEN, DMA_PAUSE, 11, 1)
  73    FIELD(ZDMA_CH_IEN, DMA_DONE, 10, 1)
  74    FIELD(ZDMA_CH_IEN, AXI_WR_DATA, 9, 1)
  75    FIELD(ZDMA_CH_IEN, AXI_RD_DATA, 8, 1)
  76    FIELD(ZDMA_CH_IEN, AXI_RD_DST_DSCR, 7, 1)
  77    FIELD(ZDMA_CH_IEN, AXI_RD_SRC_DSCR, 6, 1)
  78    FIELD(ZDMA_CH_IEN, IRQ_DST_ACCT_ERR, 5, 1)
  79    FIELD(ZDMA_CH_IEN, IRQ_SRC_ACCT_ERR, 4, 1)
  80    FIELD(ZDMA_CH_IEN, BYTE_CNT_OVRFL, 3, 1)
  81    FIELD(ZDMA_CH_IEN, DST_DSCR_DONE, 2, 1)
  82    FIELD(ZDMA_CH_IEN, SRC_DSCR_DONE, 1, 1)
  83    FIELD(ZDMA_CH_IEN, INV_APB, 0, 1)
  84REG32(ZDMA_CH_IDS, 0x10c)
  85    FIELD(ZDMA_CH_IDS, DMA_PAUSE, 11, 1)
  86    FIELD(ZDMA_CH_IDS, DMA_DONE, 10, 1)
  87    FIELD(ZDMA_CH_IDS, AXI_WR_DATA, 9, 1)
  88    FIELD(ZDMA_CH_IDS, AXI_RD_DATA, 8, 1)
  89    FIELD(ZDMA_CH_IDS, AXI_RD_DST_DSCR, 7, 1)
  90    FIELD(ZDMA_CH_IDS, AXI_RD_SRC_DSCR, 6, 1)
  91    FIELD(ZDMA_CH_IDS, IRQ_DST_ACCT_ERR, 5, 1)
  92    FIELD(ZDMA_CH_IDS, IRQ_SRC_ACCT_ERR, 4, 1)
  93    FIELD(ZDMA_CH_IDS, BYTE_CNT_OVRFL, 3, 1)
  94    FIELD(ZDMA_CH_IDS, DST_DSCR_DONE, 2, 1)
  95    FIELD(ZDMA_CH_IDS, SRC_DSCR_DONE, 1, 1)
  96    FIELD(ZDMA_CH_IDS, INV_APB, 0, 1)
  97REG32(ZDMA_CH_CTRL0, 0x110)
  98    FIELD(ZDMA_CH_CTRL0, OVR_FETCH, 7, 1)
  99    FIELD(ZDMA_CH_CTRL0, POINT_TYPE, 6, 1)
 100    FIELD(ZDMA_CH_CTRL0, MODE, 4, 2)
 101    FIELD(ZDMA_CH_CTRL0, RATE_CTRL, 3, 1)
 102    FIELD(ZDMA_CH_CTRL0, CONT_ADDR, 2, 1)
 103    FIELD(ZDMA_CH_CTRL0, CONT, 1, 1)
 104REG32(ZDMA_CH_CTRL1, 0x114)
 105    FIELD(ZDMA_CH_CTRL1, DST_ISSUE, 5, 5)
 106    FIELD(ZDMA_CH_CTRL1, SRC_ISSUE, 0, 5)
 107REG32(ZDMA_CH_FCI, 0x118)
 108    FIELD(ZDMA_CH_FCI, PROG_CELL_CNT, 2, 2)
 109    FIELD(ZDMA_CH_FCI, SIDE, 1, 1)
 110    FIELD(ZDMA_CH_FCI, EN, 0, 1)
 111REG32(ZDMA_CH_STATUS, 0x11c)
 112    FIELD(ZDMA_CH_STATUS, STATE, 0, 2)
 113REG32(ZDMA_CH_DATA_ATTR, 0x120)
 114    FIELD(ZDMA_CH_DATA_ATTR, ARBURST, 26, 2)
 115    FIELD(ZDMA_CH_DATA_ATTR, ARCACHE, 22, 4)
 116    FIELD(ZDMA_CH_DATA_ATTR, ARQOS, 18, 4)
 117    FIELD(ZDMA_CH_DATA_ATTR, ARLEN, 14, 4)
 118    FIELD(ZDMA_CH_DATA_ATTR, AWBURST, 12, 2)
 119    FIELD(ZDMA_CH_DATA_ATTR, AWCACHE, 8, 4)
 120    FIELD(ZDMA_CH_DATA_ATTR, AWQOS, 4, 4)
 121    FIELD(ZDMA_CH_DATA_ATTR, AWLEN, 0, 4)
 122REG32(ZDMA_CH_DSCR_ATTR, 0x124)
 123    FIELD(ZDMA_CH_DSCR_ATTR, AXCOHRNT, 8, 1)
 124    FIELD(ZDMA_CH_DSCR_ATTR, AXCACHE, 4, 4)
 125    FIELD(ZDMA_CH_DSCR_ATTR, AXQOS, 0, 4)
 126REG32(ZDMA_CH_SRC_DSCR_WORD0, 0x128)
 127REG32(ZDMA_CH_SRC_DSCR_WORD1, 0x12c)
 128    FIELD(ZDMA_CH_SRC_DSCR_WORD1, MSB, 0, 17)
 129REG32(ZDMA_CH_SRC_DSCR_WORD2, 0x130)
 130    FIELD(ZDMA_CH_SRC_DSCR_WORD2, SIZE, 0, 30)
 131REG32(ZDMA_CH_SRC_DSCR_WORD3, 0x134)
 132    FIELD(ZDMA_CH_SRC_DSCR_WORD3, CMD, 3, 2)
 133    FIELD(ZDMA_CH_SRC_DSCR_WORD3, INTR, 2, 1)
 134    FIELD(ZDMA_CH_SRC_DSCR_WORD3, TYPE, 1, 1)
 135    FIELD(ZDMA_CH_SRC_DSCR_WORD3, COHRNT, 0, 1)
 136REG32(ZDMA_CH_DST_DSCR_WORD0, 0x138)
 137REG32(ZDMA_CH_DST_DSCR_WORD1, 0x13c)
 138    FIELD(ZDMA_CH_DST_DSCR_WORD1, MSB, 0, 17)
 139REG32(ZDMA_CH_DST_DSCR_WORD2, 0x140)
 140    FIELD(ZDMA_CH_DST_DSCR_WORD2, SIZE, 0, 30)
 141REG32(ZDMA_CH_DST_DSCR_WORD3, 0x144)
 142    FIELD(ZDMA_CH_DST_DSCR_WORD3, INTR, 2, 1)
 143    FIELD(ZDMA_CH_DST_DSCR_WORD3, TYPE, 1, 1)
 144    FIELD(ZDMA_CH_DST_DSCR_WORD3, COHRNT, 0, 1)
 145REG32(ZDMA_CH_WR_ONLY_WORD0, 0x148)
 146REG32(ZDMA_CH_WR_ONLY_WORD1, 0x14c)
 147REG32(ZDMA_CH_WR_ONLY_WORD2, 0x150)
 148REG32(ZDMA_CH_WR_ONLY_WORD3, 0x154)
 149REG32(ZDMA_CH_SRC_START_LSB, 0x158)
 150REG32(ZDMA_CH_SRC_START_MSB, 0x15c)
 151    FIELD(ZDMA_CH_SRC_START_MSB, ADDR, 0, 17)
 152REG32(ZDMA_CH_DST_START_LSB, 0x160)
 153REG32(ZDMA_CH_DST_START_MSB, 0x164)
 154    FIELD(ZDMA_CH_DST_START_MSB, ADDR, 0, 17)
 155REG32(ZDMA_CH_RATE_CTRL, 0x18c)
 156    FIELD(ZDMA_CH_RATE_CTRL, CNT, 0, 12)
 157REG32(ZDMA_CH_SRC_CUR_PYLD_LSB, 0x168)
 158REG32(ZDMA_CH_SRC_CUR_PYLD_MSB, 0x16c)
 159    FIELD(ZDMA_CH_SRC_CUR_PYLD_MSB, ADDR, 0, 17)
 160REG32(ZDMA_CH_DST_CUR_PYLD_LSB, 0x170)
 161REG32(ZDMA_CH_DST_CUR_PYLD_MSB, 0x174)
 162    FIELD(ZDMA_CH_DST_CUR_PYLD_MSB, ADDR, 0, 17)
 163REG32(ZDMA_CH_SRC_CUR_DSCR_LSB, 0x178)
 164REG32(ZDMA_CH_SRC_CUR_DSCR_MSB, 0x17c)
 165    FIELD(ZDMA_CH_SRC_CUR_DSCR_MSB, ADDR, 0, 17)
 166REG32(ZDMA_CH_DST_CUR_DSCR_LSB, 0x180)
 167REG32(ZDMA_CH_DST_CUR_DSCR_MSB, 0x184)
 168    FIELD(ZDMA_CH_DST_CUR_DSCR_MSB, ADDR, 0, 17)
 169REG32(ZDMA_CH_TOTAL_BYTE, 0x188)
 170REG32(ZDMA_CH_RATE_CNTL, 0x18c)
 171    FIELD(ZDMA_CH_RATE_CNTL, CNT, 0, 12)
 172REG32(ZDMA_CH_IRQ_SRC_ACCT, 0x190)
 173    FIELD(ZDMA_CH_IRQ_SRC_ACCT, CNT, 0, 8)
 174REG32(ZDMA_CH_IRQ_DST_ACCT, 0x194)
 175    FIELD(ZDMA_CH_IRQ_DST_ACCT, CNT, 0, 8)
 176REG32(ZDMA_CH_DBG0, 0x198)
 177    FIELD(ZDMA_CH_DBG0, CMN_BUF_FREE, 0, 9)
 178REG32(ZDMA_CH_DBG1, 0x19c)
 179    FIELD(ZDMA_CH_DBG1, CMN_BUF_OCC, 0, 9)
 180REG32(ZDMA_CH_CTRL2, 0x200)
 181    FIELD(ZDMA_CH_CTRL2, EN, 0, 1)
 182
 183enum {
 184    PT_REG = 0,
 185    PT_MEM = 1,
 186};
 187
 188enum {
 189    CMD_HALT = 1,
 190    CMD_STOP = 2,
 191};
 192
 193enum {
 194    RW_MODE_RW = 0,
 195    RW_MODE_WO = 1,
 196    RW_MODE_RO = 2,
 197};
 198
 199enum {
 200    DTYPE_LINEAR = 0,
 201    DTYPE_LINKED = 1,
 202};
 203
 204enum {
 205    AXI_BURST_FIXED = 0,
 206    AXI_BURST_INCR  = 1,
 207};
 208
 209static void zdma_ch_imr_update_irq(XlnxZDMA *s)
 210{
 211    bool pending;
 212
 213    pending = s->regs[R_ZDMA_CH_ISR] & ~s->regs[R_ZDMA_CH_IMR];
 214
 215    qemu_set_irq(s->irq_zdma_ch_imr, pending);
 216}
 217
 218static void zdma_ch_isr_postw(RegisterInfo *reg, uint64_t val64)
 219{
 220    XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
 221    zdma_ch_imr_update_irq(s);
 222}
 223
 224static uint64_t zdma_ch_ien_prew(RegisterInfo *reg, uint64_t val64)
 225{
 226    XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
 227    uint32_t val = val64;
 228
 229    s->regs[R_ZDMA_CH_IMR] &= ~val;
 230    zdma_ch_imr_update_irq(s);
 231    return 0;
 232}
 233
 234static uint64_t zdma_ch_ids_prew(RegisterInfo *reg, uint64_t val64)
 235{
 236    XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
 237    uint32_t val = val64;
 238
 239    s->regs[R_ZDMA_CH_IMR] |= val;
 240    zdma_ch_imr_update_irq(s);
 241    return 0;
 242}
 243
 244static void zdma_set_state(XlnxZDMA *s, XlnxZDMAState state)
 245{
 246    s->state = state;
 247    ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, state);
 248
 249    /* Signal error if we have an error condition.  */
 250    if (s->error) {
 251        ARRAY_FIELD_DP32(s->regs, ZDMA_CH_STATUS, STATE, 3);
 252    }
 253}
 254
 255static void zdma_src_done(XlnxZDMA *s)
 256{
 257    unsigned int cnt;
 258    cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT);
 259    cnt++;
 260    ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT, cnt);
 261    ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, SRC_DSCR_DONE, true);
 262
 263    /* Did we overflow?  */
 264    if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_SRC_ACCT, CNT)) {
 265        ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_SRC_ACCT_ERR, true);
 266    }
 267    zdma_ch_imr_update_irq(s);
 268}
 269
 270static void zdma_dst_done(XlnxZDMA *s)
 271{
 272    unsigned int cnt;
 273    cnt = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT);
 274    cnt++;
 275    ARRAY_FIELD_DP32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT, cnt);
 276    ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DST_DSCR_DONE, true);
 277
 278    /* Did we overflow?  */
 279    if (cnt != ARRAY_FIELD_EX32(s->regs, ZDMA_CH_IRQ_DST_ACCT, CNT)) {
 280        ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, IRQ_DST_ACCT_ERR, true);
 281    }
 282    zdma_ch_imr_update_irq(s);
 283}
 284
 285static uint64_t zdma_get_regaddr64(XlnxZDMA *s, unsigned int basereg)
 286{
 287    uint64_t addr;
 288
 289    addr = s->regs[basereg + 1];
 290    addr <<= 32;
 291    addr |= s->regs[basereg];
 292
 293    return addr;
 294}
 295
 296static void zdma_put_regaddr64(XlnxZDMA *s, unsigned int basereg, uint64_t addr)
 297{
 298    s->regs[basereg] = addr;
 299    s->regs[basereg + 1] = addr >> 32;
 300}
 301
 302static bool zdma_load_descriptor(XlnxZDMA *s, uint64_t addr, void *buf)
 303{
 304    /* ZDMA descriptors must be aligned to their own size.  */
 305    if (addr % sizeof(XlnxZDMADescr)) {
 306        qemu_log_mask(LOG_GUEST_ERROR,
 307                      "zdma: unaligned descriptor at %" PRIx64,
 308                      addr);
 309        memset(buf, 0x0, sizeof(XlnxZDMADescr));
 310        s->error = true;
 311        return false;
 312    }
 313
 314    address_space_rw(s->dma_as, addr, s->attr,
 315                     buf, sizeof(XlnxZDMADescr), false);
 316    return true;
 317}
 318
 319static void zdma_load_src_descriptor(XlnxZDMA *s)
 320{
 321    uint64_t src_addr;
 322    unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
 323
 324    if (ptype == PT_REG) {
 325        memcpy(&s->dsc_src, &s->regs[R_ZDMA_CH_SRC_DSCR_WORD0],
 326               sizeof(s->dsc_src));
 327        return;
 328    }
 329
 330    src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB);
 331
 332    if (!zdma_load_descriptor(s, src_addr, &s->dsc_src)) {
 333        ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_SRC_DSCR, true);
 334    }
 335}
 336
 337static void zdma_load_dst_descriptor(XlnxZDMA *s)
 338{
 339    uint64_t dst_addr;
 340    unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
 341
 342    if (ptype == PT_REG) {
 343        memcpy(&s->dsc_dst, &s->regs[R_ZDMA_CH_DST_DSCR_WORD0],
 344               sizeof(s->dsc_dst));
 345        return;
 346    }
 347
 348    dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB);
 349
 350    if (!zdma_load_descriptor(s, dst_addr, &s->dsc_dst)) {
 351        ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, AXI_RD_DST_DSCR, true);
 352    }
 353}
 354
 355static uint64_t zdma_update_descr_addr(XlnxZDMA *s, bool type,
 356                                       unsigned int basereg)
 357{
 358    uint64_t addr, next;
 359
 360    if (type == DTYPE_LINEAR) {
 361        next = zdma_get_regaddr64(s, basereg);
 362        next += sizeof(s->dsc_dst);
 363        zdma_put_regaddr64(s, basereg, next);
 364    } else {
 365        addr = zdma_get_regaddr64(s, basereg);
 366        addr += sizeof(s->dsc_dst);
 367        address_space_rw(s->dma_as, addr, s->attr, (void *) &next, 8, false);
 368        zdma_put_regaddr64(s, basereg, next);
 369    }
 370    return next;
 371}
 372
 373static void zdma_write_dst(XlnxZDMA *s, uint8_t *buf, uint32_t len)
 374{
 375    uint32_t dst_size, dlen;
 376    bool dst_intr, dst_type;
 377    unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
 378    unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE);
 379    unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR,
 380                                               AWBURST);
 381
 382    /* FIXED burst types are only supported in simple dma mode.  */
 383    if (ptype != PT_REG) {
 384        burst_type = AXI_BURST_INCR;
 385    }
 386
 387    while (len) {
 388        dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
 389                              SIZE);
 390        dst_type = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3,
 391                              TYPE);
 392        if (dst_size == 0 && ptype == PT_MEM) {
 393            uint64_t next;
 394            next = zdma_update_descr_addr(s, dst_type,
 395                                          R_ZDMA_CH_DST_CUR_DSCR_LSB);
 396            zdma_load_descriptor(s, next, &s->dsc_dst);
 397            dst_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
 398                                  SIZE);
 399            dst_type = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3,
 400                                  TYPE);
 401        }
 402
 403        /* Match what hardware does by ignoring the dst_size and only using
 404         * the src size for Simple register mode.  */
 405        if (ptype == PT_REG && rw_mode != RW_MODE_WO) {
 406            dst_size = len;
 407        }
 408
 409        dst_intr = FIELD_EX32(s->dsc_dst.words[3], ZDMA_CH_DST_DSCR_WORD3,
 410                              INTR);
 411
 412        dlen = len > dst_size ? dst_size : len;
 413        if (burst_type == AXI_BURST_FIXED) {
 414            if (dlen > (s->cfg.bus_width / 8)) {
 415                dlen = s->cfg.bus_width / 8;
 416            }
 417        }
 418
 419        address_space_rw(s->dma_as, s->dsc_dst.addr, s->attr, buf, dlen,
 420                         true);
 421        if (burst_type == AXI_BURST_INCR) {
 422            s->dsc_dst.addr += dlen;
 423        }
 424        dst_size -= dlen;
 425        buf += dlen;
 426        len -= dlen;
 427
 428        if (dst_size == 0 && dst_intr) {
 429            zdma_dst_done(s);
 430        }
 431
 432        /* Write back to buffered descriptor.  */
 433        s->dsc_dst.words[2] = FIELD_DP32(s->dsc_dst.words[2],
 434                                         ZDMA_CH_DST_DSCR_WORD2,
 435                                         SIZE,
 436                                         dst_size);
 437    }
 438}
 439
 440static void zdma_process_descr(XlnxZDMA *s)
 441{
 442    uint64_t src_addr;
 443    uint32_t src_size, len;
 444    unsigned int src_cmd;
 445    bool src_intr, src_type;
 446    unsigned int ptype = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, POINT_TYPE);
 447    unsigned int rw_mode = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, MODE);
 448    unsigned int burst_type = ARRAY_FIELD_EX32(s->regs, ZDMA_CH_DATA_ATTR,
 449                                               ARBURST);
 450
 451    src_addr = s->dsc_src.addr;
 452    src_size = FIELD_EX32(s->dsc_src.words[2], ZDMA_CH_SRC_DSCR_WORD2, SIZE);
 453    src_cmd = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, CMD);
 454    src_type = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, TYPE);
 455    src_intr = FIELD_EX32(s->dsc_src.words[3], ZDMA_CH_SRC_DSCR_WORD3, INTR);
 456
 457    /* FIXED burst types and non-rw modes are only supported in
 458     * simple dma mode.
 459     */
 460    if (ptype != PT_REG) {
 461        if (rw_mode != RW_MODE_RW) {
 462            qemu_log_mask(LOG_GUEST_ERROR,
 463                          "zDMA: rw-mode=%d but not simple DMA mode.\n",
 464                          rw_mode);
 465        }
 466        if (burst_type != AXI_BURST_INCR) {
 467            qemu_log_mask(LOG_GUEST_ERROR,
 468                          "zDMA: burst_type=%d but not simple DMA mode.\n",
 469                          burst_type);
 470        }
 471        burst_type = AXI_BURST_INCR;
 472        rw_mode = RW_MODE_RW;
 473    }
 474
 475    if (rw_mode == RW_MODE_WO) {
 476        /* In Simple DMA Write-Only, we need to push DST size bytes
 477         * regardless of what SRC size is set to.  */
 478        src_size = FIELD_EX32(s->dsc_dst.words[2], ZDMA_CH_DST_DSCR_WORD2,
 479                              SIZE);
 480        memcpy(s->buf, &s->regs[R_ZDMA_CH_WR_ONLY_WORD0], s->cfg.bus_width / 8);
 481    }
 482
 483    while (src_size) {
 484        len = src_size > ARRAY_SIZE(s->buf) ? ARRAY_SIZE(s->buf) : src_size;
 485        if (burst_type == AXI_BURST_FIXED) {
 486            if (len > (s->cfg.bus_width / 8)) {
 487                len = s->cfg.bus_width / 8;
 488            }
 489        }
 490
 491        if (rw_mode == RW_MODE_WO) {
 492            if (len > s->cfg.bus_width / 8) {
 493                len = s->cfg.bus_width / 8;
 494            }
 495        } else {
 496            address_space_rw(s->dma_as, src_addr, s->attr, s->buf, len,
 497                             false);
 498            if (burst_type == AXI_BURST_INCR) {
 499                src_addr += len;
 500            }
 501        }
 502
 503        if (rw_mode != RW_MODE_RO) {
 504            zdma_write_dst(s, s->buf, len);
 505        }
 506
 507        s->regs[R_ZDMA_CH_TOTAL_BYTE] += len;
 508        src_size -= len;
 509    }
 510
 511    ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_DONE, true);
 512
 513    if (src_intr) {
 514        zdma_src_done(s);
 515    }
 516
 517    /* Load next descriptor.  */
 518    if (ptype == PT_REG || src_cmd == CMD_STOP) {
 519        ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL2, EN, 0);
 520        zdma_set_state(s, DISABLED);
 521        return;
 522    }
 523
 524    if (src_cmd == CMD_HALT) {
 525        zdma_set_state(s, PAUSED);
 526        ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, DMA_PAUSE, 1);
 527        zdma_ch_imr_update_irq(s);
 528        return;
 529    }
 530
 531    zdma_update_descr_addr(s, src_type, R_ZDMA_CH_SRC_CUR_DSCR_LSB);
 532}
 533
 534static void zdma_run(XlnxZDMA *s)
 535{
 536    while (s->state == ENABLED && !s->error) {
 537        zdma_load_src_descriptor(s);
 538
 539        if (s->error) {
 540            zdma_set_state(s, DISABLED);
 541        } else {
 542            zdma_process_descr(s);
 543        }
 544    }
 545
 546    zdma_ch_imr_update_irq(s);
 547}
 548
 549static void zdma_update_descr_addr_from_start(XlnxZDMA *s)
 550{
 551    uint64_t src_addr, dst_addr;
 552
 553    src_addr = zdma_get_regaddr64(s, R_ZDMA_CH_SRC_START_LSB);
 554    zdma_put_regaddr64(s, R_ZDMA_CH_SRC_CUR_DSCR_LSB, src_addr);
 555    dst_addr = zdma_get_regaddr64(s, R_ZDMA_CH_DST_START_LSB);
 556    zdma_put_regaddr64(s, R_ZDMA_CH_DST_CUR_DSCR_LSB, dst_addr);
 557    zdma_load_dst_descriptor(s);
 558}
 559
 560static void zdma_ch_ctrlx_postw(RegisterInfo *reg, uint64_t val64)
 561{
 562    XlnxZDMA *s = XLNX_ZDMA(reg->opaque);
 563
 564    if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL2, EN)) {
 565        s->error = false;
 566
 567        if (s->state == PAUSED &&
 568            ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) {
 569            if (ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT_ADDR) == 1) {
 570                zdma_update_descr_addr_from_start(s);
 571            } else {
 572                bool src_type = FIELD_EX32(s->dsc_src.words[3],
 573                                       ZDMA_CH_SRC_DSCR_WORD3, TYPE);
 574                zdma_update_descr_addr(s, src_type,
 575                                          R_ZDMA_CH_SRC_CUR_DSCR_LSB);
 576            }
 577            ARRAY_FIELD_DP32(s->regs, ZDMA_CH_CTRL0, CONT, false);
 578            zdma_set_state(s, ENABLED);
 579        } else if (s->state == DISABLED) {
 580            zdma_update_descr_addr_from_start(s);
 581            zdma_set_state(s, ENABLED);
 582        }
 583    } else {
 584        /* Leave Paused state?  */
 585        if (s->state == PAUSED &&
 586            ARRAY_FIELD_EX32(s->regs, ZDMA_CH_CTRL0, CONT)) {
 587            zdma_set_state(s, DISABLED);
 588        }
 589    }
 590
 591    zdma_run(s);
 592}
 593
 594static RegisterAccessInfo zdma_regs_info[] = {
 595    {   .name = "ZDMA_ERR_CTRL",  .addr = A_ZDMA_ERR_CTRL,
 596        .rsvd = 0xfffffffe,
 597    },{ .name = "ZDMA_CH_ISR",  .addr = A_ZDMA_CH_ISR,
 598        .rsvd = 0xfffff000,
 599        .w1c = 0xfff,
 600        .post_write = zdma_ch_isr_postw,
 601    },{ .name = "ZDMA_CH_IMR",  .addr = A_ZDMA_CH_IMR,
 602        .reset = 0xfff,
 603        .rsvd = 0xfffff000,
 604        .ro = 0xfff,
 605    },{ .name = "ZDMA_CH_IEN",  .addr = A_ZDMA_CH_IEN,
 606        .rsvd = 0xfffff000,
 607        .pre_write = zdma_ch_ien_prew,
 608    },{ .name = "ZDMA_CH_IDS",  .addr = A_ZDMA_CH_IDS,
 609        .rsvd = 0xfffff000,
 610        .pre_write = zdma_ch_ids_prew,
 611    },{ .name = "ZDMA_CH_CTRL0",  .addr = A_ZDMA_CH_CTRL0,
 612        .reset = 0x80,
 613        .rsvd = 0xffffff01,
 614        .post_write = zdma_ch_ctrlx_postw,
 615    },{ .name = "ZDMA_CH_CTRL1",  .addr = A_ZDMA_CH_CTRL1,
 616        .reset = 0x3ff,
 617        .rsvd = 0xfffffc00,
 618    },{ .name = "ZDMA_CH_FCI",  .addr = A_ZDMA_CH_FCI,
 619        .rsvd = 0xffffffc0,
 620    },{ .name = "ZDMA_CH_STATUS",  .addr = A_ZDMA_CH_STATUS,
 621        .rsvd = 0xfffffffc,
 622        .ro = 0x3,
 623    },{ .name = "ZDMA_CH_DATA_ATTR",  .addr = A_ZDMA_CH_DATA_ATTR,
 624        .reset = 0x483d20f,
 625        .rsvd = 0xf0000000,
 626    },{ .name = "ZDMA_CH_DSCR_ATTR",  .addr = A_ZDMA_CH_DSCR_ATTR,
 627        .rsvd = 0xfffffe00,
 628    },{ .name = "ZDMA_CH_SRC_DSCR_WORD0",  .addr = A_ZDMA_CH_SRC_DSCR_WORD0,
 629    },{ .name = "ZDMA_CH_SRC_DSCR_WORD1",  .addr = A_ZDMA_CH_SRC_DSCR_WORD1,
 630        .rsvd = 0xfffe0000,
 631    },{ .name = "ZDMA_CH_SRC_DSCR_WORD2",  .addr = A_ZDMA_CH_SRC_DSCR_WORD2,
 632        .rsvd = 0xc0000000,
 633    },{ .name = "ZDMA_CH_SRC_DSCR_WORD3",  .addr = A_ZDMA_CH_SRC_DSCR_WORD3,
 634        .rsvd = 0xffffffe0,
 635    },{ .name = "ZDMA_CH_DST_DSCR_WORD0",  .addr = A_ZDMA_CH_DST_DSCR_WORD0,
 636    },{ .name = "ZDMA_CH_DST_DSCR_WORD1",  .addr = A_ZDMA_CH_DST_DSCR_WORD1,
 637        .rsvd = 0xfffe0000,
 638    },{ .name = "ZDMA_CH_DST_DSCR_WORD2",  .addr = A_ZDMA_CH_DST_DSCR_WORD2,
 639        .rsvd = 0xc0000000,
 640    },{ .name = "ZDMA_CH_DST_DSCR_WORD3",  .addr = A_ZDMA_CH_DST_DSCR_WORD3,
 641        .rsvd = 0xfffffffa,
 642    },{ .name = "ZDMA_CH_WR_ONLY_WORD0",  .addr = A_ZDMA_CH_WR_ONLY_WORD0,
 643    },{ .name = "ZDMA_CH_WR_ONLY_WORD1",  .addr = A_ZDMA_CH_WR_ONLY_WORD1,
 644    },{ .name = "ZDMA_CH_WR_ONLY_WORD2",  .addr = A_ZDMA_CH_WR_ONLY_WORD2,
 645    },{ .name = "ZDMA_CH_WR_ONLY_WORD3",  .addr = A_ZDMA_CH_WR_ONLY_WORD3,
 646    },{ .name = "ZDMA_CH_SRC_START_LSB",  .addr = A_ZDMA_CH_SRC_START_LSB,
 647    },{ .name = "ZDMA_CH_SRC_START_MSB",  .addr = A_ZDMA_CH_SRC_START_MSB,
 648        .rsvd = 0xfffe0000,
 649    },{ .name = "ZDMA_CH_DST_START_LSB",  .addr = A_ZDMA_CH_DST_START_LSB,
 650    },{ .name = "ZDMA_CH_DST_START_MSB",  .addr = A_ZDMA_CH_DST_START_MSB,
 651        .rsvd = 0xfffe0000,
 652    },{ .name = "ZDMA_CH_SRC_CUR_PYLD_LSB",  .addr = A_ZDMA_CH_SRC_CUR_PYLD_LSB,
 653        .ro = 0xffffffff,
 654    },{ .name = "ZDMA_CH_SRC_CUR_PYLD_MSB",  .addr = A_ZDMA_CH_SRC_CUR_PYLD_MSB,
 655        .rsvd = 0xfffe0000,
 656        .ro = 0x1ffff,
 657    },{ .name = "ZDMA_CH_DST_CUR_PYLD_LSB",  .addr = A_ZDMA_CH_DST_CUR_PYLD_LSB,
 658        .ro = 0xffffffff,
 659    },{ .name = "ZDMA_CH_DST_CUR_PYLD_MSB",  .addr = A_ZDMA_CH_DST_CUR_PYLD_MSB,
 660        .rsvd = 0xfffe0000,
 661        .ro = 0x1ffff,
 662    },{ .name = "ZDMA_CH_SRC_CUR_DSCR_LSB",  .addr = A_ZDMA_CH_SRC_CUR_DSCR_LSB,
 663        .ro = 0xffffffff,
 664    },{ .name = "ZDMA_CH_SRC_CUR_DSCR_MSB",  .addr = A_ZDMA_CH_SRC_CUR_DSCR_MSB,
 665        .rsvd = 0xfffe0000,
 666        .ro = 0x1ffff,
 667    },{ .name = "ZDMA_CH_DST_CUR_DSCR_LSB",  .addr = A_ZDMA_CH_DST_CUR_DSCR_LSB,
 668        .ro = 0xffffffff,
 669    },{ .name = "ZDMA_CH_DST_CUR_DSCR_MSB",  .addr = A_ZDMA_CH_DST_CUR_DSCR_MSB,
 670        .rsvd = 0xfffe0000,
 671        .ro = 0x1ffff,
 672    },{ .name = "ZDMA_CH_TOTAL_BYTE",  .addr = A_ZDMA_CH_TOTAL_BYTE,
 673        .w1c = 0xffffffff,
 674    },{ .name = "ZDMA_CH_RATE_CNTL",  .addr = A_ZDMA_CH_RATE_CNTL,
 675        .rsvd = 0xfffff000,
 676    },{ .name = "ZDMA_CH_IRQ_SRC_ACCT",  .addr = A_ZDMA_CH_IRQ_SRC_ACCT,
 677        .rsvd = 0xffffff00,
 678        .ro = 0xff,
 679        .cor = 0xff,
 680    },{ .name = "ZDMA_CH_IRQ_DST_ACCT",  .addr = A_ZDMA_CH_IRQ_DST_ACCT,
 681        .rsvd = 0xffffff00,
 682        .ro = 0xff,
 683        .cor = 0xff,
 684    },{ .name = "ZDMA_CH_DBG0",  .addr = A_ZDMA_CH_DBG0,
 685        .rsvd = 0xfffffe00,
 686        .ro = 0x1ff,
 687    },{ .name = "ZDMA_CH_DBG1",  .addr = A_ZDMA_CH_DBG1,
 688        .rsvd = 0xfffffe00,
 689        .ro = 0x1ff,
 690    },{ .name = "ZDMA_CH_CTRL2",  .addr = A_ZDMA_CH_CTRL2,
 691        .rsvd = 0xfffffffe,
 692        .post_write = zdma_ch_ctrlx_postw,
 693    }
 694};
 695
 696static void zdma_reset(DeviceState *dev)
 697{
 698    XlnxZDMA *s = XLNX_ZDMA(dev);
 699    unsigned int i;
 700
 701    for (i = 0; i < ARRAY_SIZE(s->regs_info); ++i) {
 702        register_reset(&s->regs_info[i]);
 703    }
 704
 705    zdma_ch_imr_update_irq(s);
 706}
 707
 708static uint64_t zdma_read(void *opaque, hwaddr addr, unsigned size)
 709{
 710    XlnxZDMA *s = XLNX_ZDMA(opaque);
 711    RegisterInfo *r = &s->regs_info[addr / 4];
 712
 713    if (!r->data) {
 714        gchar *path = object_get_canonical_path(OBJECT(s));
 715        qemu_log("%s: Decode error: read from %" HWADDR_PRIx "\n",
 716                 path,
 717                 addr);
 718        g_free(path);
 719        ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
 720        zdma_ch_imr_update_irq(s);
 721        return 0;
 722    }
 723    return register_read(r, ~0, NULL, false);
 724}
 725
 726static void zdma_write(void *opaque, hwaddr addr, uint64_t value,
 727                      unsigned size)
 728{
 729    XlnxZDMA *s = XLNX_ZDMA(opaque);
 730    RegisterInfo *r = &s->regs_info[addr / 4];
 731
 732    if (!r->data) {
 733        gchar *path = object_get_canonical_path(OBJECT(s));
 734        qemu_log("%s: Decode error: write to %" HWADDR_PRIx "=%" PRIx64 "\n",
 735                 path,
 736                 addr, value);
 737        g_free(path);
 738        ARRAY_FIELD_DP32(s->regs, ZDMA_CH_ISR, INV_APB, true);
 739        zdma_ch_imr_update_irq(s);
 740        return;
 741    }
 742    register_write(r, value, ~0, NULL, false);
 743}
 744
 745static const MemoryRegionOps zdma_ops = {
 746    .read = zdma_read,
 747    .write = zdma_write,
 748    .endianness = DEVICE_LITTLE_ENDIAN,
 749    .valid = {
 750        .min_access_size = 4,
 751        .max_access_size = 4,
 752    },
 753};
 754
 755static void zdma_realize(DeviceState *dev, Error **errp)
 756{
 757    XlnxZDMA *s = XLNX_ZDMA(dev);
 758    unsigned int i;
 759
 760    for (i = 0; i < ARRAY_SIZE(zdma_regs_info); ++i) {
 761        RegisterInfo *r = &s->regs_info[zdma_regs_info[i].addr / 4];
 762
 763        *r = (RegisterInfo) {
 764            .data = (uint8_t *)&s->regs[
 765                    zdma_regs_info[i].addr / 4],
 766            .data_size = sizeof(uint32_t),
 767            .access = &zdma_regs_info[i],
 768            .opaque = s,
 769        };
 770    }
 771
 772    if (s->dma_mr) {
 773        s->dma_as = g_malloc0(sizeof(AddressSpace));
 774        address_space_init(s->dma_as, s->dma_mr, NULL);
 775    } else {
 776        s->dma_as = &address_space_memory;
 777    }
 778    s->attr = MEMTXATTRS_UNSPECIFIED;
 779}
 780
 781static void zdma_init(Object *obj)
 782{
 783    XlnxZDMA *s = XLNX_ZDMA(obj);
 784    SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
 785
 786    memory_region_init_io(&s->iomem, obj, &zdma_ops, s,
 787                          TYPE_XLNX_ZDMA, ZDMA_R_MAX * 4);
 788    sysbus_init_mmio(sbd, &s->iomem);
 789    sysbus_init_irq(sbd, &s->irq_zdma_ch_imr);
 790
 791    object_property_add_link(obj, "dma", TYPE_MEMORY_REGION,
 792                             (Object **)&s->dma_mr,
 793                             qdev_prop_allow_set_link_before_realize,
 794                             OBJ_PROP_LINK_STRONG,
 795                             &error_abort);
 796}
 797
 798static const VMStateDescription vmstate_zdma = {
 799    .name = TYPE_XLNX_ZDMA,
 800    .version_id = 1,
 801    .minimum_version_id = 1,
 802    .minimum_version_id_old = 1,
 803    .fields = (VMStateField[]) {
 804        VMSTATE_UINT32_ARRAY(regs, XlnxZDMA, ZDMA_R_MAX),
 805        VMSTATE_UINT32(state, XlnxZDMA),
 806        VMSTATE_UINT32_ARRAY(dsc_src.words, XlnxZDMA, 4),
 807        VMSTATE_UINT32_ARRAY(dsc_dst.words, XlnxZDMA, 4),
 808        VMSTATE_END_OF_LIST(),
 809    }
 810};
 811
 812static Property zdma_props[] = {
 813    DEFINE_PROP_UINT32("bus-width", XlnxZDMA, cfg.bus_width, 64),
 814    DEFINE_PROP_END_OF_LIST(),
 815};
 816
 817static void zdma_class_init(ObjectClass *klass, void *data)
 818{
 819    DeviceClass *dc = DEVICE_CLASS(klass);
 820
 821    dc->reset = zdma_reset;
 822    dc->realize = zdma_realize;
 823    dc->props = zdma_props;
 824    dc->vmsd = &vmstate_zdma;
 825}
 826
 827static const TypeInfo zdma_info = {
 828    .name          = TYPE_XLNX_ZDMA,
 829    .parent        = TYPE_SYS_BUS_DEVICE,
 830    .instance_size = sizeof(XlnxZDMA),
 831    .class_init    = zdma_class_init,
 832    .instance_init = zdma_init,
 833};
 834
 835static void zdma_register_types(void)
 836{
 837    type_register_static(&zdma_info);
 838}
 839
 840type_init(zdma_register_types)
 841