qemu/hw/ssi/xlnx-axiqspi.c
<<
>>
Prefs
   1/*
   2 * QEMU model of the AXIQSPI Memory Map for AXI QSPI
   3 *
   4 * Copyright (c) 2020 Xilinx Inc.
   5 *
   6 * Partially autogenerated by xregqemu.py 2019-11-21.
   7 * Written by: Joe Komlodi<komlodi@xilinx.com>
   8 *
   9 * Permission is hereby granted, free of charge, to any person obtaining a copy
  10 * of this software and associated documentation files (the "Software"), to deal
  11 * in the Software without restriction, including without limitation the rights
  12 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  13 * copies of the Software, and to permit persons to whom the Software is
  14 * furnished to do so, subject to the following conditions:
  15 *
  16 * The above copyright notice and this permission notice shall be included in
  17 * all copies or substantial portions of the Software.
  18 *
  19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  24 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  25 * THE SOFTWARE.
  26 */
  27
  28#include "qemu/osdep.h"
  29#include "hw/sysbus.h"
  30#include "hw/register.h"
  31#include "qemu/bitops.h"
  32#include "qemu/log.h"
  33#include "qemu/fifo.h"
  34#include "qapi/error.h"
  35#include "qemu/error-report.h"
  36#include "hw/irq.h"
  37#include "migration/vmstate.h"
  38#include "hw/qdev-properties.h"
  39#include "hw/dma-ctrl.h"
  40
  41#include "sysemu/dma.h"
  42#include "hw/ssi/ssi.h"
  43#include "hw/fdt_generic_util.h"
  44
  45#define TYPE_XLNX_AXIQSPI "xlnx,axi-quad-spi-3.2"
  46
  47#define XLNX_AXIQSPI(obj) \
  48     OBJECT_CHECK(XlnxAXIQSPI, (obj), TYPE_XLNX_AXIQSPI)
  49
  50#ifndef XLNX_AXIQSPI_ERR_DEBUG
  51#define XLNX_AXIQSPI_ERR_DEBUG 0
  52#endif
  53
  54#define DB_PRINT(...) do { \
  55    if (XLNX_AXIQSPI_ERR_DEBUG) { \
  56        qemu_log(__VA_ARGS__); \
  57    } \
  58} while (0)
  59
  60REG32(DGIER, 0x1c)
  61    FIELD(DGIER, GIE, 31, 1)
  62REG32(IPISR, 0x20)
  63    FIELD(IPISR, COMMAND_ERROR, 13, 1)
  64    FIELD(IPISR, LOOPBACK_ERROR, 12, 1)
  65    FIELD(IPISR, MSB_ERROR, 11, 1)
  66    FIELD(IPISR, SLAVE_MODE_ERROR, 10, 1)
  67    FIELD(IPISR, CPOL_CPHA_ERROR, 9, 1)
  68    FIELD(IPISR, DRR_NOT_EMPTY, 8, 1)
  69    FIELD(IPISR, SLAVE_SELECT_MODE, 7, 1)
  70    FIELD(IPISR, TXFIFO_HALF_EMPTY, 6, 1)
  71    FIELD(IPISR, DRR_OVERRUN, 5, 1)
  72    FIELD(IPISR, DRR_FULL, 4, 1)
  73    FIELD(IPISR, DTR_UNDERRUN, 3, 1)
  74    FIELD(IPISR, DTR_EMPTY, 2, 1)
  75    FIELD(IPISR, SLAVE_MODF, 1, 1)
  76    FIELD(IPISR, MODF, 0, 1)
  77REG32(IPIER, 0x28)
  78    FIELD(IPIER, COMMAND_ERROR, 13, 1)
  79    FIELD(IPIER, LOOPBACK_ERROR, 12, 1)
  80    FIELD(IPIER, MSB_ERROR, 11, 1)
  81    FIELD(IPIER, SLAVE_MODE_ERROR, 10, 1)
  82    FIELD(IPIER, CPOL_CPHA_ERROR, 9, 1)
  83    FIELD(IPIER, DRR_NOT_EMPTY, 8, 1)
  84    FIELD(IPIER, SLAVE_SELECT_MODE, 7, 1)
  85    FIELD(IPIER, TX_FIFO_HALF_EMPTY, 6, 1)
  86    FIELD(IPIER, DRR_OVERRUN, 5, 1)
  87    FIELD(IPIER, DRR_FULL, 4, 1)
  88    FIELD(IPIER, DTR_UNDERRUN, 3, 1)
  89    FIELD(IPIER, DTR_EMPTY, 2, 1)
  90    FIELD(IPIER, SLAVE_MODF, 1, 1)
  91    FIELD(IPIER, MODF, 0, 1)
  92REG32(SRR, 0x40)
  93REG32(SPICR, 0x60)
  94    FIELD(SPICR, LSB_FIRST, 9, 1)
  95    FIELD(SPICR, MASTER_TRANSACTION_INHIBIT, 8, 1)
  96    FIELD(SPICR, MANUAL_SLAVE_SELECT_ASSERTION_ENABLE, 7, 1)
  97    FIELD(SPICR, RX_FIFO_RESET, 6, 1)
  98    FIELD(SPICR, TX_FIFO_RESET, 5, 1)
  99    FIELD(SPICR, CPHA, 4, 1)
 100    FIELD(SPICR, CPOL, 3, 1)
 101    FIELD(SPICR, MASTER, 2, 1)
 102    FIELD(SPICR, SPE, 1, 1)
 103    FIELD(SPICR, LOOP, 0, 1)
 104REG32(SPISR, 0x64)
 105    FIELD(SPISR, COMMAND_ERROR, 10, 1)
 106    FIELD(SPISR, LOOPBACK_ERROR, 9, 1)
 107    FIELD(SPISR, MSB_ERROR, 8, 1)
 108    FIELD(SPISR, SLAVE_MODE_ERROR, 7, 1)
 109    FIELD(SPISR, CPOL_CPHA_ERROR, 6, 1)
 110    FIELD(SPISR, SLAVE_MODE_SELECT, 5, 1)
 111    FIELD(SPISR, MODF, 4, 1)
 112    FIELD(SPISR, TX_FULL, 3, 1)
 113    FIELD(SPISR, TX_EMPTY, 2, 1)
 114    FIELD(SPISR, RX_FULL, 1, 1)
 115    FIELD(SPISR, RX_EMPTY, 0, 1)
 116REG32(SPI_DTR, 0x68)
 117REG32(SPI_DRR, 0x6c)
 118REG32(SPI_SSR, 0x70)
 119REG32(SPI_TXFIFO_OR, 0x74)
 120REG32(SPI_RXFIFO_OR, 0x78)
 121
 122#define XLNX_AXIQSPI_R_MAX (R_SPI_RXFIFO_OR + 1)
 123
 124/* These registers are only used if AXIQSPI is in XIP mode */
 125REG32(XIP_CONFIG_REG, 0x60)
 126    FIELD(XIP_CONFIG_REG, CPOL, 1, 1)
 127    FIELD(XIP_CONFIG_REG, CPHA, 0, 1)
 128REG32(XIP_STATUS_REG, 0x64)
 129    FIELD(XIP_STATUS_REG, AXI_TRANSACTION_ERROR, 4, 1)
 130    FIELD(XIP_STATUS_REG, CPOL_CPHA_ERROR, 3, 1)
 131    FIELD(XIP_STATUS_REG, MASTER_MODF, 2, 1)
 132    FIELD(XIP_STATUS_REG, RX_FULL, 1, 1)
 133    FIELD(XIP_STATUS_REG, RX_EMPTY, 0, 1)
 134
 135#define XLNX_AXIQSPI_XIP_R_MAX (R_XIP_STATUS_REG + 1)
 136
 137#define AXIQSPI_NUM_CS_MAX 32
 138#define AXIQSPI_NUM_CS_MIN 1
 139#define AXIQSPI_SRR_RESET_VAL 0x0000000A
 140
 141/*
 142 * Extended addressing bits for relevant flash chips.
 143 * These are used when we need to check if we're changing between
 144 * 3- and 4-byte addressing.
 145 */
 146#define SP_EXTADDR_BIT 0x80
 147#define MI_EXTADDR_BIT 0x01
 148#define MA_EXTADDR_BIT 0x20
 149
 150#define MAKE_32BIT_MASK(len) (uint32_t)MAKE_64BIT_MASK(0, len)
 151
 152/* SPI mode values match those used in the DTS */
 153typedef enum {
 154    AXIQSPI_MODE_STD = 0,
 155    AXIQSPI_MODE_DUAL = 1,
 156    AXIQSPI_MODE_QUAD = 2,
 157    AXIQSPI_MODE_INVALID = 3
 158} AXIQSPIMode;
 159
 160/* SPI memory values match those used in the DTS */
 161typedef enum {
 162    SPI_MEM_MIXED = 0,
 163    SPI_MEM_WINBOND = 1,
 164    SPI_MEM_MICRON = 2,
 165    SPI_MEM_SPANSION = 3,
 166    SPI_MEM_MACRONIX = 4,
 167    SPI_MEM_INVALID = 5
 168} SPIMemType;
 169
 170/* Commands are taken from the AXIQSPI support command list */
 171typedef enum {
 172    NOP = 0x00,
 173    WRITE_STATUS_REG = 0x01,
 174    PAGE_PROGRAM = 0x02,
 175    READ_DATA = 0x03,
 176    WRITE_DISABLE = 0x04,
 177    READ_STATUS_REG = 0x05,
 178    WRITE_ENABLE = 0x06,
 179    READ_STATUS_REG_2 = 0x07,
 180    FAST_READ = 0x0B,
 181    FAST_READ_4B = 0x0C,
 182    PAGE_PROGRAM_4B = 0x12,
 183    READ_4B = 0x13,
 184    AUTOBOOT_REG = 0x15,
 185    BANK_REGISTER_READ = 0x16,
 186    BANK_REGISTER_WRITE = 0x17,
 187    ERASE_FAST_BOOT_REGISTER = 0x18,
 188    SUBSECTOR_ERASE = 0x20,
 189    ERASE_4B = 0x21,
 190    READ_PASSWORD_4B = 0x27,
 191    WRITE_PASSWORD = 0x28,
 192    UNLOCK_PASSWORD = 0x29,
 193    READ_SECURITY_REGISTER = 0x2B,
 194    WRITE_LOCK_REG = 0x2C,
 195    READ_LOCK_REG = 0x2D,
 196    WRITE_SECURITY_REGISTER = 0x2F,
 197    CLEAR_STATUS_REG_1 = 0x30,
 198    WRITE_STATUS_REG_2 = 0x31,
 199    QUAD_PAGE_PROGRAM = 0x32,
 200    QUAD_PAGE_PROGRAM_4B = 0x34,
 201    READ_CONF_REG = 0x35,
 202    MI_ENTER_QUAD_IO_MODE = 0x35,
 203    QUAD_INPUT_FAST_IO = 0x38,
 204    FAST_READ_DUAL = 0x3B,
 205    FAST_READ_DUAL_4B = 0x3C,
 206    QUAD_PAGE_PROGRAM_IO_4B = 0x3E,
 207    PROGRAM_OTP = 0x42,
 208    READ_UNIQUE_ID = 0x4B,
 209    SP_OTP_READ = 0x4B,
 210    MI_OTP_READ = 0x4B,
 211    CLEAR_STATUS_REG = 0x50,
 212    ERASE_32K = 0x52,
 213    READ_FLASH_DISCOVERABLE_PARAMS = 0x5A,
 214    SUBSECTOR_ERASE_4B = 0x5C,
 215    BULK_ERASE = 0x60,
 216    WRITE_VOLATILE_ENH_CONFIG_REG = 0x61,
 217    READ_VOLATILE_ENH_CONFIG_REG = 0x65,
 218    RESET_ENABLE = 0x66,
 219    WRITE_PROTECT_SEL = 0x68,
 220    FAST_READ_QUAD = 0x6B,
 221    READ_QUAD_4B = 0x6C,
 222    READ_FLAG_STATUS_REG = 0x70,
 223    ERASE_SUSPEND = 0x75,
 224    ERASE_RESUME = 0x7A,
 225    GANG_BLOCK_LOCK = 0x7E,
 226    WRITE_VOLATILE_CONFIG_REG = 0x81,
 227    READ_VOLATILE_CONFIG_REG = 0x85,
 228    PROGRAM_SUSPEND = 0x85,
 229    PROGRAM_RESUME = 0x8A,
 230    READ_DEVICE_ID = 0x90,
 231    GANG_BLOCK_UNLOCK = 0x98,
 232    RESET_MEMORY = 0x99,
 233    READ_JEDEC_ID = 0x9F,
 234    DUAL_FAST_PROGRAM = 0xA2,
 235    HIGH_PERFORMANCE_MODE = 0xA3,
 236    WRITE_GLOBAL_FREEZE_BIT = 0xA6,
 237    PPB_LOCK_BIT_READ = 0xA7,
 238    READ_GLOBAL_FREEZE_BIT = 0xA7,
 239    PPB_LOCK_BIT_WRITE = 0xA8,
 240    EXIT_HIGH_PERFORMANCE_MODE = 0xAB,
 241    READ_ELECTRONIC_SIGNATURE = 0xAB,
 242    EXIT_POWER_DOWN = 0xAB,
 243    QUAD_IO_READ_ID = 0xAF,
 244    PROGRAM_ERASE_SUSPEND = 0xB0,
 245    WRITE_NONVOLATILE_CONFIG_REG = 0xB1,
 246    READ_NONVOLATILE_CONFIG_REG = 0xB5,
 247    ENTER_4B_ADDR_MODE = 0xB7,
 248    POWER_DOWN = 0xB9,
 249    BANK_REGISTER_ACCESS = 0xB9,
 250    FAST_READ_DUAL_IO = 0xBB,
 251    FAST_READ_DUAL_IO_4B = 0xBC,
 252    SET_BURST_READ_LENGTH = 0xC0,
 253    EXIT_SECURE_OTP = 0xC1,
 254    DIE_ERASE = 0xC4,
 255    WRITE_EXTENDED_ADDR_REG = 0xC5,
 256    READ_EXTENDED_ADDR_REG = 0xC8,
 257    CHIP_ERASE = 0xC7,
 258    DUAL_EXTENDED_FAST_PROGRAM = 0xD2,
 259    BLOCK_ERASE = 0xD8,
 260    BLOCK_ERASE_4B = 0xDC,
 261    DYB_READ = 0xE0,
 262    WRITE_DATA_PROTECTION_REG_BITS = 0xE0,
 263    READ_VOLATILE_LOCK_4B = 0xE0,
 264    DYB_WRITE = 0xE1,
 265    WRITE_VOLATILE_LOCK_4B = 0xE1,
 266    PPB_READ = 0xE2,
 267    READ_NONVOLATILE_LOCK = 0xE2,
 268    WRITE_DATA_PROTECTION_REG = 0xE2,
 269    PPB_PROGRAM = 0xE3,
 270    WRITE_NONVOLATILE_LOCK = 0xE3,
 271    OCTAL_WORD_READ_QUAD_IO = 0xE3,
 272    PPB_ERASE = 0xE4,
 273    ERASE_NONVOLATILE_LOCK = 0xE4,
 274    WRITE_VOLATILE_LOCK_REG = 0xE5,
 275    PASSWORD_READ = 0xE7,
 276    PASSWORD_PROGRAM = 0xE8,
 277    PASSWORD_UNLOCK = 0xE9,
 278    EXIT_4B_ADDR_MODE = 0xE9,
 279    FAST_READ_QUAD_IO = 0xEB,
 280    FAST_READ_QUAD_IO_4B = 0xEC,
 281    RESET = 0xF0,
 282    EXIT_QUAD_IO_MODE = 0xF5,
 283    MODE_BIT_RESET = 0xFF
 284} FlashCmd;
 285
 286typedef enum {
 287    AXIQSPI_STATE_STOPPED = 0,
 288    AXIQSPI_STATE_TX_CMD = 1,
 289    AXIQSPI_STATE_TX_ADDR = 2,
 290    AXIQSPI_STATE_TX_DUMMIES = 3,
 291    AXIQSPI_STATE_TX = 4
 292} AXIQSPIState;
 293
 294typedef struct XlnxAXIQSPIConf {
 295    bool xip_mode;
 296    uint16_t fifo_depth;
 297    uint8_t transaction_width;
 298    uint32_t tx_width_mask;
 299    uint8_t spi_mem;
 300    uint8_t mode;
 301    uint8_t num_cs;
 302    uint32_t num_cs_mask;
 303    uint8_t xip_addr_bits;
 304    uint64_t xip_base;
 305} XlnxAXIQSPIConf;
 306
 307typedef enum {
 308    LINK_STATE_SINGLE = 1,
 309    LINK_STATE_DUAL = 2,
 310    LINK_STATE_QUAD = 4,
 311} AXIQSPILinkState;
 312
 313typedef struct XlnxAXIQSPI {
 314    SysBusDevice parent_obj;
 315    MemoryRegion iomem;
 316    XlnxAXIQSPIConf conf;
 317    Fifo tx_fifo;
 318    Fifo rx_fifo;
 319    SSIBus *spi_bus;
 320    qemu_irq *cs_lines;
 321    qemu_irq irq;
 322
 323    AXIQSPIState state;
 324    AXIQSPILinkState link_state;
 325    uint8_t addr_bytes_txed;
 326    uint8_t dummy_bytes_txed;
 327    size_t bytes_txed;
 328    uint8_t cmd;
 329    uint8_t addr_bytes;
 330    uint8_t num_dummies;
 331    bool is_addr_change_cmd;
 332    bool is_4b_addressing;
 333    bool is_xip_wb_init;
 334    uint32_t prev_ss;
 335
 336    MemoryRegion xip_mr;
 337
 338    uint32_t regs[XLNX_AXIQSPI_R_MAX];
 339    RegisterInfo reg_info[XLNX_AXIQSPI_R_MAX];
 340} XlnxAXIQSPI;
 341
 342static inline uint8_t axiqspi_get_addressing(const XlnxAXIQSPI *s)
 343{
 344    if (s->conf.xip_mode) {
 345        return s->conf.xip_addr_bits == 32 ? 4 : 3;
 346    } else {
 347        return s->is_4b_addressing ? 4 : 3;
 348    }
 349}
 350
 351static bool mixed_parse_cmd(XlnxAXIQSPI *s)
 352{
 353    switch (s->cmd) {
 354    case WRITE_STATUS_REG:
 355    case READ_STATUS_REG:
 356    case WRITE_DISABLE:
 357    case WRITE_ENABLE:
 358    case ERASE_SUSPEND:
 359    case ERASE_RESUME:
 360    case READ_UNIQUE_ID:
 361    case READ_JEDEC_ID:
 362    case CHIP_ERASE:
 363        s->addr_bytes = 0;
 364        s->num_dummies = 0;
 365
 366        return true;
 367    case PAGE_PROGRAM:
 368    case READ_DATA:
 369    case QUAD_PAGE_PROGRAM:
 370    case BLOCK_ERASE:
 371        s->addr_bytes = 3;
 372        s->num_dummies = 0;
 373
 374        return true;
 375    default:
 376        return false;
 377    }
 378}
 379
 380/*
 381 * Not the same as mixed mode command parsing.
 382 * This condenses commands shared between flash chips to avoid
 383 * repeating ourselves.
 384 *
 385 * Returns false if the command wasn't a shared command.
 386 */
 387static bool shared_parse_cmd(XlnxAXIQSPI *s)
 388{
 389    switch (s->cmd) {
 390    case READ_JEDEC_ID:
 391    case WRITE_ENABLE:
 392    case WRITE_DISABLE:
 393    case CHIP_ERASE:
 394    case BULK_ERASE:
 395    case READ_STATUS_REG:
 396    case READ_CONF_REG:
 397    case WRITE_STATUS_REG:
 398        s->addr_bytes = 0;
 399        s->num_dummies = 0;
 400
 401        return true;
 402    case QUAD_PAGE_PROGRAM:
 403        s->addr_bytes = axiqspi_get_addressing(s);
 404        s->num_dummies = 0;
 405
 406        return true;
 407    case FAST_READ:
 408        s->addr_bytes = axiqspi_get_addressing(s);
 409        s->num_dummies = 1;
 410
 411        return true;
 412    case FAST_READ_DUAL:
 413    case FAST_READ_DUAL_IO:
 414        s->addr_bytes = axiqspi_get_addressing(s);
 415        s->num_dummies = 2;
 416
 417        return true;
 418    case FAST_READ_QUAD:
 419    case FAST_READ_QUAD_IO:
 420        s->addr_bytes = axiqspi_get_addressing(s);
 421        s->num_dummies = 4;
 422
 423        return true;
 424    case PAGE_PROGRAM:
 425    case READ_DATA:
 426    case BLOCK_ERASE:
 427        s->addr_bytes = axiqspi_get_addressing(s);
 428        s->num_dummies = 0;
 429
 430        return true;
 431    case READ_UNIQUE_ID:
 432    case ERASE_RESUME:
 433    case ERASE_SUSPEND:
 434        qemu_log_mask(LOG_UNIMP, "Command %x not implemented\n", s->cmd);
 435
 436        return false;
 437    default:
 438        return false;
 439    }
 440}
 441
 442static bool winbond_parse_cmd(XlnxAXIQSPI *s)
 443{
 444    switch (s->cmd) {
 445    case WRITE_STATUS_REG:
 446    case EXIT_HIGH_PERFORMANCE_MODE:
 447        s->addr_bytes = 0;
 448        s->num_dummies = 0;
 449
 450        return true;
 451    case ERASE_32K:
 452    case SUBSECTOR_ERASE:
 453    case READ_DEVICE_ID:
 454    case HIGH_PERFORMANCE_MODE:
 455        s->addr_bytes = 3;
 456        s->num_dummies = 0;
 457
 458        return true;
 459    case READ_STATUS_REG_2:
 460    case WRITE_STATUS_REG_2:
 461    case POWER_DOWN:
 462        qemu_log_mask(LOG_UNIMP, "Command %x not implemented\n", s->cmd);
 463
 464        return false;
 465    default:
 466        return false;
 467    }
 468}
 469
 470static bool spansion_parse_cmd(XlnxAXIQSPI *s)
 471{
 472    switch (s->cmd) {
 473    case WRITE_STATUS_REG:
 474    case READ_CONF_REG:
 475    case BANK_REGISTER_ACCESS:
 476    case RESET:
 477    case MODE_BIT_RESET:
 478        s->addr_bytes = 0;
 479        s->num_dummies = 0;
 480
 481        return true;
 482    case BANK_REGISTER_WRITE:
 483        s->is_addr_change_cmd = true;
 484        s->addr_bytes = 0;
 485        s->num_dummies = 0;
 486
 487        return true;
 488    case READ_DEVICE_ID:
 489        s->addr_bytes = 3;
 490        s->num_dummies = 0;
 491
 492        return true;
 493    case FAST_READ_4B:
 494        s->addr_bytes = 4;
 495        s->num_dummies = 1;
 496
 497        return true;
 498    case FAST_READ_DUAL_4B:
 499    case FAST_READ_DUAL_IO_4B:
 500        s->addr_bytes = 4;
 501        s->num_dummies = 2;
 502
 503        return true;
 504    case READ_QUAD_4B:
 505        s->addr_bytes = 4;
 506        s->num_dummies = 4;
 507
 508        return true;
 509    case PAGE_PROGRAM_4B:
 510    case READ_4B:
 511    case BLOCK_ERASE_4B:
 512    case QUAD_PAGE_PROGRAM_4B:
 513        s->addr_bytes = 4;
 514        s->num_dummies = 0;
 515
 516        return true;
 517    case READ_ELECTRONIC_SIGNATURE:
 518        s->addr_bytes = 0;
 519        s->num_dummies = 3;
 520
 521        return true;
 522    case CLEAR_STATUS_REG:
 523    case BANK_REGISTER_READ:
 524    case READ_SECURITY_REGISTER:
 525    case WRITE_SECURITY_REGISTER:
 526    case PPB_LOCK_BIT_READ:
 527    case PPB_LOCK_BIT_WRITE:
 528    case PPB_ERASE:
 529    case PASSWORD_READ:
 530    case PASSWORD_PROGRAM:
 531    case PASSWORD_UNLOCK:
 532    case PROGRAM_SUSPEND:
 533    case PROGRAM_RESUME:
 534    case DYB_READ:
 535    case DYB_WRITE:
 536    case SP_OTP_READ:
 537    case PROGRAM_OTP:
 538    case PPB_READ:
 539    case PPB_PROGRAM:
 540    case READ_FLASH_DISCOVERABLE_PARAMS:
 541        qemu_log_mask(LOG_UNIMP, "Command %x not implemented\n", s->cmd);
 542
 543        return false;
 544    default:
 545        return false;
 546    }
 547}
 548
 549static bool micron_parse_cmd(XlnxAXIQSPI *s)
 550{
 551    switch (s->cmd) {
 552    case WRITE_STATUS_REG:
 553    case READ_NONVOLATILE_CONFIG_REG:
 554    case READ_VOLATILE_CONFIG_REG:
 555    case WRITE_VOLATILE_CONFIG_REG:
 556    case READ_VOLATILE_ENH_CONFIG_REG:
 557    case WRITE_VOLATILE_ENH_CONFIG_REG:
 558    case READ_FLAG_STATUS_REG:
 559    case MI_ENTER_QUAD_IO_MODE:
 560    case EXIT_QUAD_IO_MODE:
 561    case ERASE_32K:
 562    case RESET_MEMORY:
 563        s->num_dummies = 0;
 564        s->addr_bytes = 0;
 565
 566        return true;
 567    case WRITE_NONVOLATILE_CONFIG_REG:
 568        s->is_addr_change_cmd = true;
 569        s->num_dummies = 0;
 570        s->addr_bytes = 0;
 571
 572        return true;
 573    case DUAL_FAST_PROGRAM:
 574        s->num_dummies = 0;
 575        s->addr_bytes = axiqspi_get_addressing(s);
 576
 577        return true;
 578    case DIE_ERASE:
 579    case SUBSECTOR_ERASE:
 580        s->num_dummies = 0;
 581        s->addr_bytes = axiqspi_get_addressing(s);
 582
 583        return true;
 584    case FAST_READ_4B:
 585        s->num_dummies = 1;
 586        s->addr_bytes = 4;
 587
 588        return true;
 589    case FAST_READ_DUAL_4B:
 590    case FAST_READ_DUAL_IO_4B:
 591        s->num_dummies = 2;
 592        s->addr_bytes = 4;
 593
 594        return true;
 595    case READ_QUAD_4B:
 596        s->num_dummies = 4;
 597        s->addr_bytes = 4;
 598
 599        return true;
 600    case PAGE_PROGRAM_4B:
 601    case QUAD_PAGE_PROGRAM_4B:
 602    case QUAD_PAGE_PROGRAM_IO_4B:
 603    case BLOCK_ERASE_4B:
 604    case ERASE_4B:
 605    case SUBSECTOR_ERASE_4B:
 606    case READ_4B:
 607        s->num_dummies = 0;
 608        s->addr_bytes = 4;
 609
 610        return true;
 611    case QUAD_IO_READ_ID:
 612    case ERASE_NONVOLATILE_LOCK:
 613    case PASSWORD_PROGRAM:
 614    case WRITE_VOLATILE_LOCK_REG:
 615    case CLEAR_STATUS_REG:
 616    case WRITE_EXTENDED_ADDR_REG:
 617    case READ_EXTENDED_ADDR_REG:
 618    case POWER_DOWN:
 619    case EXIT_POWER_DOWN:
 620    case READ_GLOBAL_FREEZE_BIT:
 621    case WRITE_GLOBAL_FREEZE_BIT:
 622    case WRITE_PASSWORD:
 623    case UNLOCK_PASSWORD:
 624    case READ_LOCK_REG:
 625    case WRITE_LOCK_REG:
 626    case RESET_ENABLE:
 627    case ENTER_4B_ADDR_MODE:
 628    case EXIT_4B_ADDR_MODE:
 629    case READ_FLASH_DISCOVERABLE_PARAMS:
 630    case MI_OTP_READ:
 631    case DUAL_EXTENDED_FAST_PROGRAM:
 632    case QUAD_INPUT_FAST_IO:
 633    case PROGRAM_OTP:
 634    case READ_NONVOLATILE_LOCK:
 635    case WRITE_NONVOLATILE_LOCK:
 636    case READ_VOLATILE_LOCK_4B:
 637    case WRITE_VOLATILE_LOCK_4B:
 638    case READ_PASSWORD_4B:
 639        qemu_log_mask(LOG_UNIMP, "Command %x not implemented\n", s->cmd);
 640
 641        return false;
 642    default:
 643        return false;
 644    }
 645}
 646
 647static bool macronix_parse_cmd(XlnxAXIQSPI *s)
 648{
 649    switch (s->cmd) {
 650    case RESET_MEMORY:
 651    case READ_VOLATILE_CONFIG_REG:
 652    case EXIT_QUAD_IO_MODE:
 653    case BANK_REGISTER_READ:
 654    case BANK_REGISTER_WRITE:
 655        s->num_dummies = 0;
 656        s->addr_bytes = 0;
 657
 658        return true;
 659    case WRITE_STATUS_REG:
 660        s->is_addr_change_cmd = true;
 661        s->num_dummies = 0;
 662        s->addr_bytes = 0;
 663
 664        return true;
 665    case READ_4B:
 666    case FAST_READ_DUAL_IO_4B:
 667    case READ_QUAD_4B:
 668    case PAGE_PROGRAM_4B:
 669    case QUAD_PAGE_PROGRAM_IO_4B:
 670    case ERASE_4B:
 671        s->num_dummies = 0;
 672        s->addr_bytes = 4;
 673
 674        return true;
 675    case PAGE_PROGRAM:
 676    case READ_DEVICE_ID:
 677    case READ_DATA:
 678    case QUAD_PAGE_PROGRAM:
 679    case ERASE_32K:
 680    case SUBSECTOR_ERASE_4B:
 681    case BLOCK_ERASE:
 682    case SUBSECTOR_ERASE:
 683    case BLOCK_ERASE_4B:
 684        s->num_dummies = 0;
 685        s->addr_bytes = axiqspi_get_addressing(s);
 686
 687        return true;
 688    case FAST_READ_4B:
 689        s->num_dummies = 1;
 690        s->addr_bytes = 4;
 691
 692        return true;
 693    case FAST_READ_DUAL_4B:
 694        s->num_dummies = 2;
 695        s->addr_bytes = 4;
 696
 697        return true;
 698    case QUAD_IO_READ_ID:
 699    case WRITE_EXTENDED_ADDR_REG:
 700    case READ_EXTENDED_ADDR_REG:
 701    case RESET_ENABLE:
 702    case PROGRAM_OTP:
 703    case PASSWORD_PROGRAM:
 704    case READ_LOCK_REG:
 705    case WRITE_LOCK_REG:
 706    case READ_STATUS_REG_2:
 707    case POWER_DOWN:
 708    case CLEAR_STATUS_REG_1:
 709    case AUTOBOOT_REG:
 710    case WRITE_PASSWORD:
 711    case UNLOCK_PASSWORD:
 712    case SET_BURST_READ_LENGTH:
 713    case ERASE_FAST_BOOT_REGISTER:
 714    case PROGRAM_ERASE_SUSPEND:
 715    case WRITE_PROTECT_SEL:
 716    case EXIT_SECURE_OTP:
 717    case GANG_BLOCK_LOCK:
 718    case GANG_BLOCK_UNLOCK:
 719    case WRITE_DATA_PROTECTION_REG_BITS:
 720    case WRITE_DATA_PROTECTION_REG:
 721    case PPB_ERASE:
 722    case READ_SECURITY_REGISTER:
 723    case WRITE_SECURITY_REGISTER:
 724    case ENTER_4B_ADDR_MODE:
 725    case EXIT_4B_ADDR_MODE:
 726    case READ_FLASH_DISCOVERABLE_PARAMS:
 727    case READ_PASSWORD_4B:
 728    case OCTAL_WORD_READ_QUAD_IO:
 729    case QUAD_INPUT_FAST_IO:
 730        qemu_log_mask(LOG_UNIMP, "Command %x not implemented\n", s->cmd);
 731
 732        return false;
 733    default:
 734        return false;
 735    }
 736}
 737
 738/* Retrives the dummy byte link state from a command for all flash chips */
 739static uint8_t shared_get_link_state(uint8_t cmd)
 740{
 741    switch (cmd) {
 742    case QUAD_PAGE_PROGRAM:
 743    case FAST_READ_QUAD:
 744    case FAST_READ_QUAD_IO:
 745    case READ_QUAD_4B:
 746    case QUAD_PAGE_PROGRAM_4B:
 747    case QUAD_IO_READ_ID:
 748    case QUAD_INPUT_FAST_IO:
 749    case QUAD_PAGE_PROGRAM_IO_4B:
 750    case OCTAL_WORD_READ_QUAD_IO:
 751        return LINK_STATE_QUAD;
 752    case FAST_READ_DUAL:
 753    case FAST_READ_DUAL_IO:
 754    case FAST_READ_DUAL_IO_4B:
 755    case FAST_READ_DUAL_4B:
 756    case DUAL_FAST_PROGRAM:
 757    case DUAL_EXTENDED_FAST_PROGRAM:
 758        return LINK_STATE_DUAL;
 759    default:
 760        return LINK_STATE_SINGLE;
 761    }
 762}
 763
 764static bool axiqspi_parse_cmd(XlnxAXIQSPI *s, uint8_t cmd)
 765{
 766    bool found;
 767
 768    /* Standard, non-XIP mode doesn't care about commands */
 769    if (s->conf.mode == AXIQSPI_MODE_STD && !s->conf.xip_mode) {
 770        s->num_dummies = 0;
 771        s->link_state = LINK_STATE_SINGLE;
 772        s->addr_bytes = 0;
 773        return true;
 774    }
 775
 776    s->is_addr_change_cmd = false;
 777    s->cmd = cmd;
 778
 779    /* Shared command handling */
 780    if (shared_parse_cmd(s)) {
 781        s->link_state = shared_get_link_state(cmd);
 782        return true;
 783    }
 784
 785    /* Individual flash handling */
 786    switch (s->conf.spi_mem) {
 787    case SPI_MEM_WINBOND:
 788        found = winbond_parse_cmd(s);
 789        break;
 790    case SPI_MEM_SPANSION:
 791        found = spansion_parse_cmd(s);
 792        break;
 793    case SPI_MEM_MICRON:
 794        found = micron_parse_cmd(s);
 795        break;
 796    case SPI_MEM_MACRONIX:
 797        found = macronix_parse_cmd(s);
 798        break;
 799    case SPI_MEM_MIXED:
 800        found = mixed_parse_cmd(s);
 801        break;
 802    default:
 803        g_assert_not_reached();
 804    }
 805
 806    if (found) {
 807        s->link_state = shared_get_link_state(cmd);
 808    }
 809
 810    return found;
 811}
 812
 813static inline bool axiqspi_tx_fifo_half_empty(XlnxAXIQSPI *s)
 814{
 815    if (!s->conf.fifo_depth) {
 816        return false;
 817    }
 818
 819    return (fifo_num_used(&s->tx_fifo) == s->conf.fifo_depth / 2);
 820}
 821
 822static bool axiqspi_transaction_width_ok(const XlnxAXIQSPI *s, uint32_t val)
 823{
 824    if (s->conf.transaction_width == 32) {
 825        return true;
 826    }
 827
 828    /*
 829     * If any of the bits fall within this mask, those bits
 830     * fall within the reserved bits of the register
 831     */
 832    return !(val & ~s->conf.tx_width_mask);
 833}
 834
 835static uint32_t axiqspi_rx_pop(XlnxAXIQSPI *s)
 836{
 837    uint32_t data;
 838
 839    if (ARRAY_FIELD_EX32(s->regs, SPISR, RX_EMPTY)) {
 840        qemu_log_mask(LOG_GUEST_ERROR, "axiqspi: Attempted to read SPI_DRR "
 841                      "when it was empty\n");
 842        return 0;
 843    }
 844
 845    if (s->conf.fifo_depth) {
 846        if (ARRAY_FIELD_EX32(s->regs, SPISR, RX_FULL)) {
 847            ARRAY_FIELD_DP32(s->regs, SPISR, RX_FULL, 0);
 848        }
 849
 850        data = fifo_pop32(&s->rx_fifo);
 851        if (fifo_is_empty(&s->rx_fifo)) {
 852            ARRAY_FIELD_DP32(s->regs, SPISR, RX_EMPTY, 1);
 853        }
 854    } else {
 855        data = s->regs[R_SPI_DRR];
 856
 857        ARRAY_FIELD_DP32(s->regs, SPISR, RX_EMPTY, 1);
 858        ARRAY_FIELD_DP32(s->regs, SPISR, RX_FULL, 0);
 859    }
 860
 861    return data;
 862}
 863
 864static void axiqspi_update_irq(XlnxAXIQSPI *s)
 865{
 866    bool gie = ARRAY_FIELD_EX32(s->regs, DGIER, GIE);
 867    bool irq = (s->regs[R_IPISR] & s->regs[R_IPIER]);
 868
 869    qemu_set_irq(s->irq, irq && gie);
 870}
 871
 872static uint64_t axiqspi_spi_drr_post_read(RegisterInfo *reg, uint64_t val64)
 873{
 874    XlnxAXIQSPI *s = XLNX_AXIQSPI(reg->opaque);
 875    uint32_t val = axiqspi_rx_pop(s);
 876
 877    return val;
 878}
 879
 880static uint64_t axiqspi_ipisr_pre_write(RegisterInfo *reg, uint64_t val64)
 881{
 882    XlnxAXIQSPI *s = XLNX_AXIQSPI(reg->opaque);
 883    uint32_t val = (uint64_t) val64;
 884
 885    /* This field should not be written to if we do not have FIFOs */
 886    if (!s->conf.fifo_depth && FIELD_EX64(val64, IPISR, TXFIFO_HALF_EMPTY)) {
 887        qemu_log_mask(LOG_GUEST_ERROR, "axiqspi: Attempted to write to "
 888                      "TXFIFO_HALF_EMPTY with no FIFOs enabled\n");
 889        val = FIELD_DP32(val, IPISR, TXFIFO_HALF_EMPTY, 0);
 890    }
 891
 892    /* IPISR bits are toggled on write */
 893    return s->regs[R_IPISR] ^= val;
 894}
 895
 896static uint64_t axiqspi_ipisr_post_read(RegisterInfo *reg, uint64_t val64)
 897{
 898    XlnxAXIQSPI *s = XLNX_AXIQSPI(reg->opaque);
 899    uint32_t val = (uint64_t) val64;
 900
 901    if (axiqspi_tx_fifo_half_empty(s)) {
 902        val = FIELD_DP32(val, IPISR, TXFIFO_HALF_EMPTY, 1);
 903    }
 904
 905    return val;
 906}
 907
 908static void axiqspi_update_cs_lines(XlnxAXIQSPI *s)
 909{
 910    uint32_t cs = s->regs[R_SPI_SSR];
 911
 912    for (uint8_t i = 0; i < s->conf.num_cs; ++i) {
 913        uint8_t curr_cs = (cs >> i) & 1;
 914        DB_PRINT("axiqspi: cs[%.2d]=%d\n", i, curr_cs);
 915        qemu_set_irq(s->cs_lines[i], curr_cs);
 916    }
 917}
 918
 919static void axiqspi_rx_push_fifo_update(XlnxAXIQSPI *s, uint32_t data)
 920{
 921    bool fifo_was_empty = fifo_is_empty(&s->rx_fifo);
 922
 923    fifo_push32(&s->rx_fifo, data);
 924
 925    /* After pushing the data, update the registers */
 926    if (fifo_was_empty) {
 927        ARRAY_FIELD_DP32(s->regs, SPISR, RX_EMPTY, 0);
 928        if (s->conf.mode == AXIQSPI_MODE_STD &&
 929            !ARRAY_FIELD_EX32(s->regs, SPICR, MASTER)) {
 930            ARRAY_FIELD_DP32(s->regs, IPISR, DRR_NOT_EMPTY, 1);
 931        }
 932    }
 933    if (fifo_is_full(&s->rx_fifo)) {
 934        ARRAY_FIELD_DP32(s->regs, IPISR, DRR_FULL, 1);
 935        ARRAY_FIELD_DP32(s->regs, SPISR, RX_FULL, 1);
 936    }
 937}
 938
 939static void axiqspi_rx_push(XlnxAXIQSPI *s, uint32_t data)
 940{
 941    if (!ARRAY_FIELD_EX32(s->regs, SPISR, RX_FULL)) {
 942        if (s->conf.fifo_depth) {
 943            axiqspi_rx_push_fifo_update(s, data);
 944        } else {
 945            s->regs[R_SPI_DRR] = data;
 946
 947            /* After pushing the data, update the registers */
 948            ARRAY_FIELD_DP32(s->regs, IPISR, DRR_FULL, 1);
 949            ARRAY_FIELD_DP32(s->regs, SPISR, RX_FULL, 1);
 950            ARRAY_FIELD_DP32(s->regs, SPISR, RX_EMPTY, 0);
 951        }
 952    } else {
 953        ARRAY_FIELD_DP32(s->regs, IPISR, DRR_OVERRUN, 1);
 954    }
 955
 956    axiqspi_update_irq(s);
 957}
 958
 959static void axiqspi_tx_push_fifo_update(XlnxAXIQSPI *s, uint32_t val)
 960{
 961    if (ARRAY_FIELD_EX32(s->regs, SPISR, TX_EMPTY)) {
 962        ARRAY_FIELD_DP32(s->regs, SPISR, TX_EMPTY, 0);
 963    }
 964
 965    fifo_push32(&s->tx_fifo, val);
 966    /* After pushing the data, update our regs accordingly */
 967    if (fifo_is_full(&s->tx_fifo)) {
 968        ARRAY_FIELD_DP32(s->regs, SPISR, TX_FULL, 1);
 969    }
 970}
 971
 972static void axiqspi_tx_push(XlnxAXIQSPI *s, uint32_t val)
 973{
 974    if (!ARRAY_FIELD_EX32(s->regs, SPISR, TX_FULL)) {
 975        /*
 976         * If the user is trying to write data wider than the
 977         * transaction width, truncate the bits and warn them.
 978         */
 979        if (!axiqspi_transaction_width_ok(s, val)) {
 980            qemu_log_mask(LOG_GUEST_ERROR, "axiqspi: Value %d is wider than "
 981                          "the transaction width, truncating it.\n", val);
 982            val &= s->conf.tx_width_mask;
 983        }
 984
 985        if (s->conf.fifo_depth) {
 986            axiqspi_tx_push_fifo_update(s, val);
 987        } else {
 988            s->regs[R_SPI_DTR] = val;
 989
 990            /* After pushing the data, update our regs */
 991            ARRAY_FIELD_DP32(s->regs, SPISR, TX_EMPTY, 0);
 992            ARRAY_FIELD_DP32(s->regs, SPISR, TX_FULL, 1);
 993        }
 994    } else {
 995        qemu_log_mask(LOG_GUEST_ERROR, "axiqspi: Attempted to write to SPI_DTR "
 996                      "when it was full\n");
 997    }
 998}
 999
1000static bool axiqspi_tx_pop(XlnxAXIQSPI *s, uint32_t *data)
1001{
1002    bool had_data = false;
1003
1004    if (s->conf.fifo_depth) {
1005        if (!fifo_is_empty(&s->tx_fifo)) {
1006            *data = fifo_pop32(&s->tx_fifo);
1007            had_data = true;
1008
1009            /* After we get get data, update our registers accordingly */
1010            if (ARRAY_FIELD_EX32(s->regs, SPISR, TX_FULL)) {
1011                ARRAY_FIELD_DP32(s->regs, SPISR, TX_FULL, 0);
1012            }
1013            if (fifo_is_empty(&s->tx_fifo)) {
1014                ARRAY_FIELD_DP32(s->regs, SPISR, TX_EMPTY, 1);
1015                ARRAY_FIELD_DP32(s->regs, IPISR, DTR_EMPTY, 1);
1016            }
1017        } else {
1018            /*
1019             * In an underrun condition, the core should transmit zeroes.
1020             * An underrun can only occur when the core is not a master.
1021             */
1022            if (!ARRAY_FIELD_EX32(s->regs, SPICR, MASTER)) {
1023                *data = 0;
1024
1025                ARRAY_FIELD_DP32(s->regs, IPISR, DTR_UNDERRUN, 1);
1026            }
1027        }
1028    } else {
1029        if (!ARRAY_FIELD_EX32(s->regs, SPISR, TX_EMPTY)) {
1030            *data = s->regs[R_SPI_DTR];
1031            had_data = true;
1032
1033            /* Register and interrupt updating */
1034            ARRAY_FIELD_DP32(s->regs, SPISR, TX_EMPTY, 1);
1035            ARRAY_FIELD_DP32(s->regs, SPISR, TX_FULL, 0);
1036            ARRAY_FIELD_DP32(s->regs, IPISR, DTR_EMPTY, 1);
1037        } else {
1038            /*
1039             * In an underrun condition, the core should transmit zeroes.
1040             * An underrun can only occur when the core is not a master.
1041             */
1042            if (!ARRAY_FIELD_EX32(s->regs, SPICR, MASTER)) {
1043                *data = 0;
1044
1045                ARRAY_FIELD_DP32(s->regs, IPISR, DTR_UNDERRUN, 1);
1046            }
1047        }
1048    }
1049
1050    axiqspi_update_irq(s);
1051
1052    return had_data;
1053}
1054
1055/*
1056 * We need to keep track of whether the device is in 3 or 4-byte addressing.
1057 * These functions look at what byte is being transmitted for the
1058 * current command and they check the extended addressing bit.
1059 */
1060static void axiqspi_sp_addressing_update(XlnxAXIQSPI *s, uint32_t data)
1061{
1062    if (s->cmd == BANK_REGISTER_WRITE) {
1063        if (data & SP_EXTADDR_BIT) {
1064            s->is_4b_addressing = true;
1065        } else {
1066            s->is_4b_addressing = false;
1067        }
1068    }
1069}
1070
1071static void axiqspi_mi_addressing_update(XlnxAXIQSPI *s, uint32_t data)
1072{
1073    if (s->cmd == WRITE_NONVOLATILE_CONFIG_REG) {
1074        if ((s->bytes_txed == 0) && (data & MI_EXTADDR_BIT)) {
1075            s->is_4b_addressing = true;
1076        } else {
1077            s->is_4b_addressing = false;
1078        }
1079    } else if (s->cmd == ENTER_4B_ADDR_MODE) {
1080        s->is_4b_addressing = true;
1081    } else if (s->cmd == EXIT_4B_ADDR_MODE) {
1082        s->is_4b_addressing = false;
1083    }
1084}
1085
1086static void axiqspi_mx_addressing_update(XlnxAXIQSPI *s, uint32_t data)
1087{
1088    if (s->cmd == WRITE_STATUS_REG) {
1089        if ((s->bytes_txed == 1) && (data & MA_EXTADDR_BIT)) {
1090            s->is_4b_addressing = true;
1091        } else {
1092            s->is_4b_addressing = false;
1093        }
1094    } else if (s->cmd == ENTER_4B_ADDR_MODE) {
1095        s->is_4b_addressing = true;
1096    } else if (s->cmd == EXIT_4B_ADDR_MODE) {
1097        s->is_4b_addressing = false;
1098    }
1099}
1100
1101static void axiqspi_addressing_update(XlnxAXIQSPI *s, uint32_t data)
1102{
1103    switch (s->conf.spi_mem) {
1104    case SPI_MEM_SPANSION:
1105        axiqspi_sp_addressing_update(s, data);
1106
1107        break;
1108    case SPI_MEM_MICRON:
1109        axiqspi_mi_addressing_update(s, data);
1110
1111        break;
1112    case SPI_MEM_MACRONIX:
1113        axiqspi_mx_addressing_update(s, data);
1114
1115        break;
1116    default:
1117        break;
1118    }
1119}
1120
1121static uint32_t bit_rev(uint32_t data, uint8_t bit_width)
1122{
1123    for (uint8_t i = 0; i < bit_width / 2; ++i) {
1124        uint32_t tmp = !!(data & (1 << i));
1125        data |= (data & (1 << (bit_width - 1 - i))) >> (bit_width - 1 - i);
1126        data |= tmp << (bit_width - 1 - i);
1127    }
1128
1129    return data;
1130}
1131
1132static void axiqspi_state_reset(XlnxAXIQSPI *s)
1133{
1134    s->bytes_txed = 0;
1135    s->addr_bytes_txed = 0;
1136    s->dummy_bytes_txed = 0;
1137    s->state = AXIQSPI_STATE_STOPPED;
1138    s->link_state = LINK_STATE_SINGLE;
1139}
1140
1141static void axiqspi_bus_txrx(XlnxAXIQSPI *s, uint32_t data)
1142{
1143    if (ARRAY_FIELD_EX32(s->regs, SPICR, LSB_FIRST)) {
1144        data = bit_rev(data, s->conf.transaction_width);
1145    }
1146
1147    if (ARRAY_FIELD_EX32(s->regs, SPICR, LOOP) &&
1148        s->conf.mode == AXIQSPI_MODE_STD) {
1149        DB_PRINT("axiqspi: TXRX loopback 0x%x\n", data);
1150    } else {
1151        DB_PRINT("axiqspi: TX->0x%x\n", data);
1152        data = ssi_transfer(s->spi_bus, data);
1153        DB_PRINT("axiqspi: RX->0x%x\n", data);
1154    }
1155    axiqspi_rx_push(s, data);
1156}
1157
1158static bool axiqspi_bus_tx_cmd(XlnxAXIQSPI *s)
1159{
1160    bool done;
1161    uint32_t data;
1162
1163    if (axiqspi_tx_pop(s, &data)) {
1164        /*
1165         * Internal logic, such as the number of dummy bytes and
1166         * addressing bytes, are determined when parsing the command.
1167         */
1168        if (axiqspi_parse_cmd(s, data & 0xFF)) {
1169            DB_PRINT("axiqspi: New command 0x%x\n", data);
1170            axiqspi_bus_txrx(s, data);
1171
1172            if (s->addr_bytes) {
1173                s->state = AXIQSPI_STATE_TX_ADDR;
1174            } else if (s->num_dummies) {
1175                s->state = AXIQSPI_STATE_TX_DUMMIES;
1176            } else {
1177                s->state = AXIQSPI_STATE_TX;
1178            }
1179        } else {
1180            ARRAY_FIELD_DP32(s->regs, IPISR, COMMAND_ERROR, 1);
1181            ARRAY_FIELD_DP32(s->regs, SPISR, COMMAND_ERROR, 1);
1182
1183            qemu_log_mask(LOG_GUEST_ERROR,
1184                          "axiqspi: Unknown command 0x%x\n", data);
1185        }
1186
1187        done = false;
1188    } else {
1189        done = true;
1190    }
1191
1192    return done;
1193}
1194
1195static bool axiqspi_bus_tx_addr(XlnxAXIQSPI *s)
1196{
1197    bool done;
1198    uint32_t data;
1199
1200    if (axiqspi_tx_pop(s, &data)) {
1201        axiqspi_bus_txrx(s, data);
1202        ++s->addr_bytes_txed;
1203
1204        if (s->addr_bytes_txed == s->addr_bytes) {
1205            if (s->num_dummies) {
1206                s->state = AXIQSPI_STATE_TX_DUMMIES;
1207            } else {
1208                s->state = AXIQSPI_STATE_TX;
1209            }
1210        }
1211
1212        done = false;
1213    } else {
1214        done = true;
1215    }
1216
1217    return done;
1218}
1219
1220static bool axiqspi_bus_tx_dummies(XlnxAXIQSPI *s)
1221{
1222    uint32_t dummy;
1223    bool had_data = false;
1224    int i;
1225
1226    DB_PRINT("axiqspi: dummy byte TX start\n");
1227    for (i = s->dummy_bytes_txed; i < s->num_dummies; ++i) {
1228        had_data = axiqspi_tx_pop(s, &dummy);
1229        if (!had_data) {
1230            break;
1231        }
1232        axiqspi_bus_txrx(s, dummy);
1233        ++s->dummy_bytes_txed;
1234
1235        /*
1236         * Transmit extra dummy bytes depending on the link state.
1237         * These dummy bytes are not put into the RXFIFO
1238         */
1239        for (uint8_t j = 1; j < 8 / s->link_state; ++j) {
1240            DB_PRINT("axiqspi: TX->0x0\n");
1241            ssi_transfer(s->spi_bus, dummy);
1242        }
1243    }
1244    DB_PRINT("axiqspi: dummy byte TX end\n");
1245
1246    if (s->dummy_bytes_txed == s->num_dummies) {
1247        s->state = AXIQSPI_STATE_TX;
1248    }
1249
1250    return !had_data;
1251}
1252
1253static bool axiqspi_bus_tx_data(XlnxAXIQSPI *s)
1254{
1255    bool done;
1256    uint32_t data;
1257
1258    if (axiqspi_tx_pop(s, &data)) {
1259        if (s->is_addr_change_cmd) {
1260            axiqspi_addressing_update(s, data);
1261        }
1262        axiqspi_bus_txrx(s, data);
1263        ++s->bytes_txed;
1264
1265        done = false;
1266    } else {
1267        done = true;
1268    }
1269
1270    return done;
1271}
1272
1273static void axiqspi_bus_tx(XlnxAXIQSPI *s)
1274{
1275    bool done = false;
1276    bool auto_ss = !ARRAY_FIELD_EX32(s->regs, SPICR,
1277                                     MANUAL_SLAVE_SELECT_ASSERTION_ENABLE);
1278    bool ss_asserted = false;
1279
1280    if (s->state == AXIQSPI_STATE_STOPPED) {
1281        return;
1282    }
1283
1284    if (!(~s->regs[R_SPI_SSR])) {
1285        qemu_log_mask(LOG_GUEST_ERROR, "axiqspi: Transmitting with no SS "
1286                      "asserted.\n");
1287    }
1288
1289    while (!done) {
1290        if (auto_ss && !ss_asserted) {
1291            axiqspi_update_cs_lines(s);
1292            ss_asserted = true;
1293        }
1294
1295        switch (s->state) {
1296        /*
1297         * The core will stay in the TX_CMD state until it
1298         * receives a valid command.
1299         * Note that in standard SPI mode commands aren't checked,
1300         * meaning that all commands are valid.
1301         */
1302        case AXIQSPI_STATE_TX_CMD:
1303            done = axiqspi_bus_tx_cmd(s);
1304            break;
1305        /*
1306         * The core stays in the TX_ADDR state until it's transmitted all of
1307         * the address bytes for the particular command
1308         */
1309        case AXIQSPI_STATE_TX_ADDR:
1310            done = axiqspi_bus_tx_addr(s);
1311            break;
1312        /* Transmit any needed dummy bytes */
1313        case AXIQSPI_STATE_TX_DUMMIES:
1314            done = axiqspi_bus_tx_dummies(s);
1315            break;
1316        /*
1317         * The core will stay in the transferring state until
1318         * either SPISSR is all 1s or SPI is disabled
1319         */
1320        case AXIQSPI_STATE_TX:
1321            done = axiqspi_bus_tx_data(s);
1322            break;
1323        default:
1324            g_assert_not_reached();
1325        }
1326    }
1327
1328    if (auto_ss) {
1329        uint32_t old_ss = s->regs[R_SPI_SSR];
1330        s->regs[R_SPI_SSR] = 0xFFFFFFFF;
1331        axiqspi_update_cs_lines(s);
1332        s->regs[R_SPI_SSR] = old_ss;
1333    }
1334
1335    axiqspi_update_irq(s);
1336}
1337
1338static void axiqspi_do_reset(XlnxAXIQSPI *s)
1339{
1340    fifo_reset(&s->tx_fifo);
1341    fifo_reset(&s->rx_fifo);
1342
1343    for (size_t i = 0; i < ARRAY_SIZE(s->reg_info); ++i) {
1344        register_reset(&s->reg_info[i]);
1345    }
1346
1347    qemu_set_irq(s->irq, 0);
1348    axiqspi_update_cs_lines(s);
1349}
1350
1351static void axiqspi_xip_do_reset(XlnxAXIQSPI *s)
1352{
1353    fifo_reset(&s->rx_fifo);
1354
1355    for (size_t i = 0; i < ARRAY_SIZE(s->reg_info); ++i) {
1356        register_reset(&s->reg_info[i]);
1357    }
1358}
1359
1360static void axiqspi_reset(DeviceState *dev)
1361{
1362    XlnxAXIQSPI *s = XLNX_AXIQSPI(dev);
1363
1364    if (s->conf.xip_mode) {
1365        axiqspi_xip_do_reset(s);
1366    } else {
1367        axiqspi_do_reset(s);
1368    }
1369}
1370
1371static uint64_t axiqspi_srr_pre_write(RegisterInfo *reg, uint64_t val64)
1372{
1373    XlnxAXIQSPI *s = XLNX_AXIQSPI(reg->opaque);
1374    uint32_t val = (uint32_t) val64;
1375
1376    if (val == AXIQSPI_SRR_RESET_VAL) {
1377        DB_PRINT("axiqspi: SPISRR Reset\n");
1378        axiqspi_do_reset(s);
1379    } else {
1380        qemu_log_mask(LOG_GUEST_ERROR,
1381                      "axiqspi: %d written to SRR, not resetting.\n", val);
1382    }
1383
1384    return 0;
1385}
1386
1387static uint64_t axiqspi_spi_ssr_pre_write(RegisterInfo *reg, uint64_t val64)
1388{
1389    XlnxAXIQSPI *s = XLNX_AXIQSPI(reg->opaque);
1390    uint32_t val = (uint32_t) val64;
1391
1392    /* Bounds check the write */
1393    if (~val & ~s->conf.num_cs_mask) {
1394        qemu_log_mask(LOG_GUEST_ERROR, "axiqspi: Attempted to write %x to "
1395                      "SPI_SSR, when the valid bits are 0x%x. "
1396                      "Truncating extra bits.\n",
1397                      val, s->conf.num_cs_mask);
1398
1399        val |= ~s->conf.num_cs_mask;
1400    }
1401
1402    s->prev_ss = s->regs[R_SPI_SSR];
1403
1404    return val;
1405}
1406
1407static void axiqspi_spi_ssr_post_write(RegisterInfo *reg, uint64_t val64)
1408{
1409    XlnxAXIQSPI *s = XLNX_AXIQSPI(reg->opaque);
1410    uint32_t ss_noninv;
1411    uint32_t ssr = (uint32_t) val64;
1412    bool manual_ss = ARRAY_FIELD_EX32(s->regs, SPICR,
1413                                      MANUAL_SLAVE_SELECT_ASSERTION_ENABLE);
1414
1415    /* SPI_SSR is inverted, uninverting it makes bit manipulation easier. */
1416    ss_noninv = ~ssr;
1417    if (ss_noninv & s->conf.num_cs_mask) {
1418        /* Only one SS line should be low at a time */
1419        if ((ss_noninv & (ss_noninv - 1)) != 0) {
1420            qemu_log_mask(LOG_GUEST_ERROR,
1421                          "axiqspi: SPI_SSR has multiple SS lines high. "
1422                          "SPI_SSR contains 0x%x\n", ssr);
1423        }
1424
1425        /*
1426         * If an SS line is asserted when previously none of them were,
1427         * the core treats this as the start of a new transaction.
1428         */
1429        if ((ssr & s->conf.num_cs_mask) !=
1430            (s->prev_ss & s->conf.num_cs_mask)) {
1431            DB_PRINT("axiqspi: New SS asserted\n");
1432            if (manual_ss) {
1433                axiqspi_update_cs_lines(s);
1434            }
1435
1436            if (ARRAY_FIELD_EX32(s->regs, SPICR, SPE) &&
1437                !ARRAY_FIELD_EX32(s->regs, SPICR, MASTER_TRANSACTION_INHIBIT) &&
1438                ARRAY_FIELD_EX32(s->regs, SPICR, MASTER)) {
1439                DB_PRINT("axiqspi: State changed to transmitting\n");
1440
1441                s->state = AXIQSPI_STATE_TX_CMD;
1442            }
1443        }
1444    } else {
1445        /* If an SS was un-asserted, update the CS lines */
1446        if (ssr != s->prev_ss) {
1447            DB_PRINT("axiqspi: SS un-asserted\n");
1448            if (manual_ss) {
1449                axiqspi_update_cs_lines(s);
1450            }
1451        }
1452        /* All of the SS lines are low, so if the core is transmitting, stop. */
1453        if (s->state != AXIQSPI_STATE_STOPPED) {
1454            axiqspi_state_reset(s);
1455        }
1456    }
1457}
1458
1459static uint64_t axiqspi_spicr_pre_write(RegisterInfo *reg, uint64_t val64)
1460{
1461    XlnxAXIQSPI *s = XLNX_AXIQSPI(reg->opaque);
1462    uint32_t val = val64;
1463    bool inhib_set = ARRAY_FIELD_EX32(s->regs, SPICR,
1464                                      MASTER_TRANSACTION_INHIBIT);
1465    bool will_clear_inhib = FIELD_EX32(val, SPICR,
1466                                       MASTER_TRANSACTION_INHIBIT) == 0;
1467    bool inhib_cleared = inhib_set && will_clear_inhib;
1468    bool spe_set = ARRAY_FIELD_EX32(s->regs, SPICR, SPE);
1469    bool will_set_spe = FIELD_EX32(val, SPICR, SPE);
1470    bool spe_now_set = !spe_set && will_set_spe;
1471
1472    /* FIFO reset */
1473    if (FIELD_EX32(val, SPICR, RX_FIFO_RESET)) {
1474        fifo_reset(&s->rx_fifo);
1475        val = FIELD_DP32(val, SPICR, RX_FIFO_RESET, 0);
1476    }
1477    if (FIELD_EX32(val, SPICR, TX_FIFO_RESET)) {
1478        fifo_reset(&s->tx_fifo);
1479        val = FIELD_DP32(val, SPICR, TX_FIFO_RESET, 0);
1480    }
1481    /*
1482     * If MASTER_TRANSACTION_INHIBIT is cleared and SPE is set, we start
1483     * transmitting.
1484     * This logic is set up to avoid starting a transfer on every SPICR write,
1485     * unless the write modifies SPE or MASTER_TRANSACTION_INHIBIT.
1486     */
1487    if (ARRAY_FIELD_EX32(s->regs, SPICR, MASTER)) {
1488        if (inhib_cleared && spe_set) {
1489            axiqspi_bus_tx(s);
1490        } else if (spe_now_set && !inhib_set) {
1491            axiqspi_bus_tx(s);
1492        } else if (spe_now_set && inhib_cleared) {
1493            axiqspi_bus_tx(s);
1494        }
1495    }
1496
1497    return val;
1498}
1499
1500static void axiqspi_spicr_post_write(RegisterInfo *reg, uint64_t val64)
1501{
1502    XlnxAXIQSPI *s = XLNX_AXIQSPI(reg->opaque);
1503
1504    /* SPI disable */
1505    if (!ARRAY_FIELD_EX32(s->regs, SPICR, SPE)) {
1506        axiqspi_state_reset(s);
1507    }
1508
1509    if (!ARRAY_FIELD_EX32(s->regs, SPICR, MASTER)) {
1510        qemu_log_mask(LOG_GUEST_ERROR,
1511                      "axiqspi: Slave mode is not supported.\n");
1512    }
1513
1514    /* Check possible misconfigurations that need action */
1515    if (s->conf.mode != AXIQSPI_MODE_STD) {
1516        uint8_t cpol = !!(ARRAY_FIELD_EX32(s->regs, SPICR, CPOL));
1517        uint8_t cpha = !!(ARRAY_FIELD_EX32(s->regs, SPICR, CPHA));
1518
1519        if (ARRAY_FIELD_EX32(s->regs, SPICR, LOOP)) {
1520            ARRAY_FIELD_DP32(s->regs, SPISR, LOOPBACK_ERROR, 1);
1521            ARRAY_FIELD_DP32(s->regs, IPISR, LOOPBACK_ERROR, 1);
1522            qemu_log_mask(LOG_GUEST_ERROR, "axiqspi: Cannot do loopback in "
1523                          "dual or quad mode.\n");
1524        }
1525        if (ARRAY_FIELD_EX32(s->regs, SPICR, LSB_FIRST)) {
1526            ARRAY_FIELD_DP32(s->regs, IPISR, MSB_ERROR, 1);
1527            ARRAY_FIELD_DP32(s->regs, SPISR, MSB_ERROR, 1);
1528            qemu_log_mask(LOG_GUEST_ERROR, "axiqspi: Cannot do LSB first in "
1529                          "dual or quad mode.\n");
1530        }
1531        if (!ARRAY_FIELD_EX32(s->regs, SPICR, MASTER)) {
1532            ARRAY_FIELD_DP32(s->regs, IPISR, SLAVE_MODE_ERROR, 1);
1533            ARRAY_FIELD_DP32(s->regs, SPISR, SLAVE_MODE_ERROR, 1);
1534            qemu_log_mask(LOG_GUEST_ERROR, "axiqspi: Cannot do slave mode in "
1535                          "dual or quad mode.\n");
1536        }
1537        if ((!cpol && cpha) || (cpol && !cpha)) {
1538            ARRAY_FIELD_DP32(s->regs, IPISR, CPOL_CPHA_ERROR, 1);
1539            ARRAY_FIELD_DP32(s->regs, SPISR, CPOL_CPHA_ERROR, 1);
1540            qemu_log_mask(LOG_GUEST_ERROR, "axiqspi: CPOL and CPHA error.\n");
1541        }
1542    }
1543
1544    axiqspi_update_irq(s);
1545}
1546
1547static uint64_t axiqspi_spi_dtr_pre_write(RegisterInfo *reg, uint64_t val64)
1548{
1549    XlnxAXIQSPI *s = XLNX_AXIQSPI(reg->opaque);
1550    uint32_t val = (uint32_t)val64;
1551
1552    axiqspi_tx_push(s, val);
1553
1554    /* If SPI and TX are enabled, TX any stored data */
1555    if (ARRAY_FIELD_EX32(s->regs, SPICR, SPE)) {
1556        if (!ARRAY_FIELD_EX32(s->regs, SPICR, MASTER) ||
1557            (ARRAY_FIELD_EX32(s->regs, SPICR, MASTER) &&
1558            !ARRAY_FIELD_EX32(s->regs, SPICR, MASTER_TRANSACTION_INHIBIT))) {
1559                axiqspi_bus_tx(s);
1560        }
1561    }
1562
1563    return val;
1564}
1565
1566static void axiqspi_dgier_post_write(RegisterInfo *reg, uint64_t val64)
1567{
1568    XlnxAXIQSPI *s = XLNX_AXIQSPI(reg->opaque);
1569
1570    axiqspi_update_irq(s);
1571}
1572
1573static uint64_t axiqspi_spi_rxfifo_or_post_read(RegisterInfo *reg,
1574                                                uint64_t val64)
1575{
1576    XlnxAXIQSPI *s = XLNX_AXIQSPI(reg->opaque);
1577
1578    if (!s->conf.fifo_depth) {
1579        qemu_log_mask(LOG_GUEST_ERROR, "Attempted to read RXFIFO_OR when FIFO "
1580                      "is disabled\n");
1581        return 0;
1582    }
1583
1584    if (fifo_is_empty(&s->rx_fifo)) {
1585        return 0;
1586    }
1587
1588    return fifo_num_used(&s->rx_fifo) - 1;
1589}
1590
1591static uint64_t axiqspi_spi_txfifo_or_post_read(RegisterInfo *reg,
1592                                                uint64_t val64)
1593{
1594    XlnxAXIQSPI *s = XLNX_AXIQSPI(reg->opaque);
1595
1596    if (!s->conf.fifo_depth) {
1597        qemu_log_mask(LOG_GUEST_ERROR, "Attempted to read TXFIFO_OR when FIFO "
1598                      "is disabled\n");
1599        return 0;
1600    }
1601
1602    if (fifo_is_empty(&s->tx_fifo)) {
1603        return 0;
1604    }
1605
1606    return fifo_num_used(&s->tx_fifo) - 1;
1607}
1608
1609static const RegisterAccessInfo axiqspi_regs_info[] = {
1610    {   .name = "DGIER",  .addr = A_DGIER,
1611        .rsvd = 0x7fffffff,
1612        .post_write = axiqspi_dgier_post_write,
1613    },{ .name = "IPISR",  .addr = A_IPISR,
1614        .rsvd = 0xffffc000,
1615        .pre_write = axiqspi_ipisr_pre_write,
1616        .post_read = axiqspi_ipisr_post_read,
1617    },{ .name = "IPIER",  .addr = A_IPIER,
1618        .rsvd = 0xffffc000,
1619    },{ .name = "SRR",  .addr = A_SRR,
1620        .pre_write = axiqspi_srr_pre_write,
1621    },{ .name = "SPICR",  .addr = A_SPICR,
1622        .reset = 0x180,
1623        .rsvd = 0xfffffc00,
1624        .pre_write = axiqspi_spicr_pre_write,
1625        .post_write = axiqspi_spicr_post_write,
1626    },{ .name = "SPISR",  .addr = A_SPISR,
1627        .reset = 0xa5,
1628        .rsvd = 0xfffff800,
1629        .ro = 0x7ff,
1630    },{ .name = "SPI_DTR",  .addr = A_SPI_DTR,
1631        .pre_write = axiqspi_spi_dtr_pre_write
1632    },{ .name = "SPI_DRR",  .addr = A_SPI_DRR,
1633        .ro = 0xffffffff,
1634        .post_read = axiqspi_spi_drr_post_read,
1635    },{ .name = "SPI_SSR",  .addr = A_SPI_SSR,
1636        .reset = 0xffffffff,
1637        .pre_write = axiqspi_spi_ssr_pre_write,
1638        .post_write = axiqspi_spi_ssr_post_write,
1639    },{ .name = "SPI_TXFIFO_OR",  .addr = A_SPI_TXFIFO_OR,
1640        .ro = 0xffffffff,
1641        .post_read = axiqspi_spi_txfifo_or_post_read,
1642    },{ .name = "SPI_RXFIFO_OR",  .addr = A_SPI_RXFIFO_OR,
1643        .ro = 0xffffffff,
1644        .post_read = axiqspi_spi_rxfifo_or_post_read,
1645    }
1646};
1647
1648static bool axiqspi_xip_check_cpol_cpha(XlnxAXIQSPI *s)
1649{
1650    bool cpol = !!(ARRAY_FIELD_EX32(s->regs, XIP_CONFIG_REG, CPOL));
1651    bool cpha = !!(ARRAY_FIELD_EX32(s->regs, XIP_CONFIG_REG, CPHA));
1652
1653    return (cpol && cpha) || (!cpol && !cpha);
1654}
1655
1656static void axiqspi_xip_spicr_post_write(RegisterInfo *reg, uint64_t val64)
1657{
1658    XlnxAXIQSPI *s = XLNX_AXIQSPI(reg->opaque);
1659
1660    if (!axiqspi_xip_check_cpol_cpha(s)) {
1661        qemu_log_mask(LOG_GUEST_ERROR, "axiqspi: CPOL and CPHA error.\n");
1662        ARRAY_FIELD_DP32(s->regs, XIP_STATUS_REG, CPOL_CPHA_ERROR, 1);
1663    }
1664}
1665
1666static uint64_t axiqspi_xip_spisr_post_read(RegisterInfo *reg, uint64_t val64)
1667{
1668    XlnxAXIQSPI *s = XLNX_AXIQSPI(reg->opaque);
1669
1670    /* XIP SPISR is brought to reset after every read */
1671    register_reset(&s->reg_info[R_XIP_STATUS_REG]);
1672
1673    return val64;
1674}
1675
1676static const RegisterAccessInfo axiqspi_xip_regs_info[] = {
1677    {   .name = "XIP_CONFIG_REG",  .addr = A_XIP_CONFIG_REG,
1678        .rsvd = 0xfffffffc,
1679        .post_write = axiqspi_xip_spicr_post_write,
1680    },{ .name = "XIP_STATUS_REG",  .addr = A_XIP_STATUS_REG,
1681        .reset = 0x1,
1682        .rsvd = 0xffffff70,
1683        .ro = 0x1f,
1684        .post_read = axiqspi_xip_spisr_post_read,
1685    }
1686};
1687
1688static const MemoryRegionOps axiqspi_ops = {
1689    .read = register_read_memory,
1690    .write = register_write_memory,
1691    .endianness = DEVICE_LITTLE_ENDIAN,
1692    .valid = {
1693        .min_access_size = 4,
1694        .max_access_size = 4,
1695    },
1696};
1697
1698static void axiqspi_xip_txrx(XlnxAXIQSPI *s, hwaddr addr, uint8_t *val,
1699                             unsigned size)
1700{
1701    qemu_set_irq(s->cs_lines[0], 0);
1702    ssi_transfer(s->spi_bus, s->cmd);
1703    DB_PRINT("axiqspi: XIP TX->0x%x\n", s->cmd);
1704
1705    for (int i = s->addr_bytes - 1; i >= 0; --i) {
1706        uint8_t shift = i * 8;
1707        uint8_t addr_byte = (addr >> shift) & 0xFF;
1708        ssi_transfer(s->spi_bus, addr_byte);
1709        DB_PRINT("axiqspi: XIP TX->0x%x\n", addr_byte);
1710    }
1711
1712    /*
1713     * If a command has dummy bytes, We TX 8 dummy bytes regardless of
1714     * command and link state.
1715     */
1716    if (s->num_dummies) {
1717        for (uint8_t i = 0; i < 8; ++i) {
1718            ssi_transfer(s->spi_bus, 0x00);
1719            DB_PRINT("axiqspi: XIP TX->0x00 dummy\n");
1720        }
1721    }
1722
1723    for (uint8_t i = 0; i < size; ++i) {
1724        val[i] = ssi_transfer(s->spi_bus, 0x00);
1725        DB_PRINT("axiqspi: XIP RX->0x%x\n", val[i]);
1726    }
1727    qemu_set_irq(s->cs_lines[0], 1);
1728}
1729
1730static void axiqspi_xip_wb_init(XlnxAXIQSPI *s)
1731{
1732    axiqspi_parse_cmd(s, HIGH_PERFORMANCE_MODE);
1733
1734    axiqspi_xip_txrx(s, 0, NULL, 0);
1735}
1736
1737static MemTxResult axiqspi_xip_read(void *opaque, hwaddr addr, uint64_t *val,
1738                                    unsigned size, MemTxAttrs attrs)
1739{
1740    XlnxAXIQSPI *s = XLNX_AXIQSPI(opaque);
1741    uint8_t cmd;
1742    bool is_4b_cmd = (s->conf.spi_mem == SPI_MEM_SPANSION ||
1743                      s->conf.spi_mem == SPI_MEM_MICRON) &&
1744                      s->conf.xip_addr_bits == 32;
1745
1746    assert(size <= 64);
1747
1748    if (!axiqspi_xip_check_cpol_cpha(s)) {
1749        qemu_log_mask(LOG_GUEST_ERROR, "axiqspi: attempted to read with "
1750                      "bad CPOL and CPHA.\n");
1751        return MEMTX_ERROR;
1752    }
1753
1754    /* These read commands are universal between all supported flashes */
1755    switch (s->conf.mode) {
1756    case AXIQSPI_MODE_STD:
1757        cmd = is_4b_cmd ? FAST_READ_4B : FAST_READ;
1758        break;
1759    case AXIQSPI_MODE_DUAL:
1760        cmd = is_4b_cmd ? FAST_READ_DUAL_IO_4B : FAST_READ_DUAL_IO;
1761        break;
1762    case AXIQSPI_MODE_QUAD:
1763        cmd = is_4b_cmd ? FAST_READ_QUAD_IO_4B : FAST_READ_QUAD_IO;
1764        break;
1765    default:
1766        g_assert_not_reached();
1767    }
1768
1769    if (s->conf.spi_mem == SPI_MEM_WINBOND &&
1770        s->conf.mode != AXIQSPI_MODE_STD &&
1771        !s->is_xip_wb_init) {
1772        axiqspi_xip_wb_init(s);
1773        s->is_xip_wb_init = true;
1774    }
1775
1776    /* Parse to get the number of dummy and addr bytes */
1777    axiqspi_parse_cmd(s, cmd);
1778
1779    /*
1780     * Rather than using the functions we would normally use for AXIQSPI
1781     * transfers, we will directly transfer things down the bus.
1782     * This is to avoid having to wrap most of our register accesses with
1783     * if statements to see if we're in XIP mode or not.
1784     *
1785     * Note that we also don't update any RX FIFO status registers.
1786     * This is because we send data right after we receive it from
1787     * the SPI flash; meaning the FIFO is always empty.
1788     */
1789    axiqspi_xip_txrx(s, addr, (uint8_t *)val, size);
1790
1791    return MEMTX_OK;
1792}
1793
1794static MemTxResult axiqspi_xip_write(void *opaque, hwaddr addr, uint64_t val,
1795                                    unsigned size, MemTxAttrs attrs)
1796{
1797    XlnxAXIQSPI *s = XLNX_AXIQSPI(opaque);
1798
1799    /* XIP mode does not allow writes */
1800    qemu_log_mask(LOG_GUEST_ERROR,
1801                  "axiqspi: attempted to write in XIP mode\n");
1802    ARRAY_FIELD_DP32(s->regs, XIP_STATUS_REG, AXI_TRANSACTION_ERROR, 1);
1803
1804    return MEMTX_ERROR;
1805}
1806
1807static const MemoryRegionOps axiqspi_xip_ops = {
1808    .read_with_attrs = axiqspi_xip_read,
1809    .write_with_attrs = axiqspi_xip_write,
1810    .endianness = DEVICE_LITTLE_ENDIAN,
1811    .valid = {
1812        .min_access_size = 1,
1813        .max_access_size = 4,
1814    },
1815};
1816
1817static bool axiqspi_validate_conf(XlnxAXIQSPI *s, Error **errp)
1818{
1819    /* Checks that must be done in any core configuration */
1820    if (s->conf.mode >= AXIQSPI_MODE_INVALID) {
1821        error_setg(errp, "axiqspi: Unknown SPI mode %d", s->conf.mode);
1822        return false;
1823    }
1824
1825    if (s->conf.num_cs < AXIQSPI_NUM_CS_MIN ||
1826        s->conf.num_cs > AXIQSPI_NUM_CS_MAX) {
1827        error_setg(errp, "axiqspi: Num CS must be between %d and %d.",
1828                   AXIQSPI_NUM_CS_MIN, AXIQSPI_NUM_CS_MAX);
1829        return false;
1830    }
1831
1832    if (s->conf.fifo_depth) {
1833        if (s->conf.xip_mode) {
1834            if (s->conf.fifo_depth != 64) {
1835                error_setg(errp, "axiqspi: FIFO depth must be 64 in XIP mode, "
1836                           "but is %d.", s->conf.fifo_depth);
1837                return false;
1838            }
1839        } else {
1840            if (s->conf.fifo_depth != 16 && s->conf.fifo_depth != 256) {
1841                error_setg(errp, "axiqspi: FIFO depth can only be 16 or 256, "
1842                           "but is %d.", s->conf.fifo_depth);
1843                return false;
1844            }
1845        }
1846    }
1847
1848    if (s->conf.transaction_width != 8 && s->conf.transaction_width != 16 &&
1849        s->conf.transaction_width != 32) {
1850        error_setg(errp, "axiqspi: transaction width must be 8, 16, or "
1851                   "32, but is %d.", s->conf.transaction_width);
1852        return false;
1853    }
1854
1855    /* Sanity checking when the core is in dual or quad mode */
1856    if (s->conf.mode == AXIQSPI_MODE_DUAL ||
1857        s->conf.mode == AXIQSPI_MODE_QUAD) {
1858        if (s->conf.transaction_width != 8) {
1859            error_setg(errp, "axiqspi: Transaction width must be 8 in "
1860                       "dual or quad mode, but is %d.",
1861                       s->conf.transaction_width);
1862            return false;
1863        }
1864
1865        if (!s->conf.fifo_depth) {
1866            error_setg(errp, "axiqspi: Dual and quad mode must use FIFO.");
1867            return false;
1868        }
1869    }
1870
1871    /* If the core is in dual or quad mode, check the memory */
1872    if (s->conf.mode != AXIQSPI_MODE_STD) {
1873        if (s->conf.spi_mem >= SPI_MEM_INVALID) {
1874            error_setg(errp, "axiqspi: Unknown SPI memory %d, "
1875                       "defaulting to mixed memory", s->conf.spi_mem);
1876            s->conf.spi_mem = SPI_MEM_MIXED;
1877        }
1878    }
1879
1880    /* Sanity checking when the core is in XIP mode */
1881    if (s->conf.xip_mode) {
1882        if (s->conf.transaction_width != 8) {
1883            error_setg(errp, "axiqspi: Transaction width must be 8 in "
1884                       "XIP mode, but is %d.", s->conf.transaction_width);
1885            return false;
1886        }
1887
1888        if (s->conf.num_cs != 1) {
1889            error_setg(errp, "axiqspi: Num CS must be 1 in XIP mode, "
1890                       "but is %d.", s->conf.num_cs);
1891            return false;
1892        }
1893
1894        if (s->conf.xip_addr_bits != 24 && s->conf.xip_addr_bits != 32) {
1895            error_setg(errp, "axiqspi: address bits must be 24 or "
1896                       "32 in XIP mode, but is %d.", s->conf.xip_addr_bits);
1897            return false;
1898        }
1899
1900        if (s->conf.xip_addr_bits == 32) {
1901            if (s->conf.spi_mem != SPI_MEM_SPANSION &&
1902                s->conf.spi_mem != SPI_MEM_MICRON) {
1903                error_setg(errp, "axiqspi: XIP 32-bit addressing is only "
1904                           "supported on Spansion or Micron memories.");
1905                return false;
1906
1907            }
1908        }
1909    }
1910
1911    return true;
1912}
1913
1914static void axiqspi_realize(DeviceState *dev, Error **errp)
1915{
1916    XlnxAXIQSPI *s = XLNX_AXIQSPI(dev);
1917    SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
1918    RegisterInfoArray *reg_array;
1919
1920    if (!axiqspi_validate_conf(s, errp)) {
1921        return;
1922    }
1923
1924    s->conf.tx_width_mask = MAKE_32BIT_MASK(s->conf.transaction_width);
1925    s->conf.num_cs_mask = MAKE_32BIT_MASK(s->conf.num_cs);
1926
1927    if (s->conf.xip_mode) {
1928        reg_array =
1929            register_init_block32(dev, axiqspi_xip_regs_info,
1930                                  ARRAY_SIZE(axiqspi_xip_regs_info),
1931                                  s->reg_info, s->regs,
1932                                  &axiqspi_ops,
1933                                  XLNX_AXIQSPI_ERR_DEBUG,
1934                                  XLNX_AXIQSPI_R_MAX * 4);
1935    } else {
1936        reg_array =
1937            register_init_block32(dev, axiqspi_regs_info,
1938                                  ARRAY_SIZE(axiqspi_regs_info),
1939                                  s->reg_info, s->regs,
1940                                  &axiqspi_ops,
1941                                  XLNX_AXIQSPI_ERR_DEBUG,
1942                                  XLNX_AXIQSPI_R_MAX * 4);
1943    }
1944
1945    memory_region_add_subregion(&s->iomem, 0x0, &reg_array->mem);
1946    sysbus_init_mmio(sbd, &s->iomem);
1947    sysbus_init_irq(sbd, &s->irq);
1948
1949    /*
1950     * Create FIFOs that hold the largest value possible, rather than
1951     * separate 8/16/32-bit ones that depend on our configuration.
1952     * This simplifies function calling at the cost of space.
1953     */
1954    if (s->conf.fifo_depth) {
1955        fifo_create32(&s->tx_fifo, s->conf.fifo_depth);
1956        fifo_create32(&s->rx_fifo, s->conf.fifo_depth);
1957    }
1958
1959    s->spi_bus = ssi_create_bus(dev, "spi0");
1960    s->cs_lines = g_new0(qemu_irq, s->conf.num_cs);
1961    ssi_auto_connect_slaves(DEVICE(s), s->cs_lines, s->spi_bus);
1962    qdev_init_gpio_out(dev, s->cs_lines, s->conf.num_cs);
1963
1964    DB_PRINT("axiqspi: realized\n");
1965}
1966
1967static void axiqspi_init(Object *obj)
1968{
1969    XlnxAXIQSPI *s = XLNX_AXIQSPI(obj);
1970
1971    /*
1972     * Even though the XIP registers take up much less space than
1973     * non-XIP mode registers, the registers array is the same size.
1974     * Therefore we initialize for the same size in both scenarios.
1975     */
1976    memory_region_init(&s->iomem, obj, TYPE_XLNX_AXIQSPI,
1977                       XLNX_AXIQSPI_R_MAX * 4);
1978}
1979
1980static bool axiqspi_parse_reg(FDTGenericMMap *obj, FDTGenericRegPropInfo reg,
1981                              Error **errp)
1982{
1983    XlnxAXIQSPI *s = XLNX_AXIQSPI(obj);
1984    ObjectClass *klass = object_class_by_name(TYPE_XLNX_AXIQSPI);
1985    SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
1986    FDTGenericMMapClass *parent_fmc;
1987
1988    parent_fmc = FDT_GENERIC_MMAP_CLASS(object_class_get_parent(klass));
1989    if (s->conf.xip_mode) {
1990        if (reg.n != 2) {
1991            error_setg(errp, "axiqspi: XIP mode requires 1 region, but "
1992                       "device tree specifies %d regions.", reg.n - 1);
1993            return false;
1994        }
1995
1996        memory_region_init_io(&s->xip_mr, OBJECT(obj), &axiqspi_xip_ops,
1997                              s, "axiqspi-xip-region", reg.s[1]);
1998        sysbus_init_mmio(sbd, &s->xip_mr);
1999    }
2000
2001    return parent_fmc ? parent_fmc->parse_reg(obj, reg, errp) : false;
2002}
2003
2004static const VMStateDescription vmstate_axiqspi = {
2005    .name = TYPE_XLNX_AXIQSPI,
2006    .version_id = 1,
2007    .minimum_version_id = 1,
2008    .fields = (VMStateField[]) {
2009        VMSTATE_UINT32_ARRAY(regs, XlnxAXIQSPI, XLNX_AXIQSPI_R_MAX),
2010        VMSTATE_END_OF_LIST(),
2011    }
2012};
2013
2014static Property xlnx_axiqspi_properties[] = {
2015    /*
2016     * 0-1.
2017     * If XIP mode is enabled, the model expects another <addr, size> pair in
2018     * the reg parameter in the device tree.
2019     * This pair is used to create the XIP region.
2020     */
2021    DEFINE_PROP_BOOL("xip-mode", XlnxAXIQSPI, conf.xip_mode, false),
2022    /*
2023     * 24, 32; only used in XIP mode.
2024     * In non-XIP mode the amount of bits an address has can change, but
2025     * in XIP mode it must be constant and defined at device creation.
2026     */
2027    DEFINE_PROP_UINT8("spi-mem-addr-bits", XlnxAXIQSPI, conf.xip_addr_bits, 24),
2028    /* 0, 16, 256 */
2029    DEFINE_PROP_UINT16("fifo-size",  XlnxAXIQSPI, conf.fifo_depth, 256),
2030    /* 8, 16, 32 */
2031    DEFINE_PROP_UINT8("bits-per-word", XlnxAXIQSPI,
2032                      conf.transaction_width, 8),
2033    /*
2034     * 0-4:
2035     * 0 = Mixed
2036     * 1 = Winbond
2037     * 2 = Micron (Numonyx)
2038     * 3 = Spansion
2039     * 4 = Macronix
2040     */
2041    DEFINE_PROP_UINT8("spi-memory", XlnxAXIQSPI, conf.spi_mem, 2),
2042    /*
2043     * 0-2:
2044     * 0 = standard
2045     * 1 = dual
2046     * 2 = quad
2047     */
2048    DEFINE_PROP_UINT8("spi-mode", XlnxAXIQSPI, conf.mode, 0),
2049    /* 1-32 */
2050    DEFINE_PROP_UINT8("num-ss-bits", XlnxAXIQSPI, conf.num_cs, 1),
2051
2052    DEFINE_PROP_END_OF_LIST(),
2053};
2054
2055static void axiqspi_class_init(ObjectClass *klass, void *data)
2056{
2057    DeviceClass *dc = DEVICE_CLASS(klass);
2058    FDTGenericMMapClass *fmc = FDT_GENERIC_MMAP_CLASS(klass);
2059
2060    dc->reset = axiqspi_reset;
2061    device_class_set_props(dc, xlnx_axiqspi_properties);
2062    dc->realize = axiqspi_realize;
2063    dc->vmsd = &vmstate_axiqspi;
2064    fmc->parse_reg = axiqspi_parse_reg;
2065}
2066
2067static const TypeInfo axiqspi_info = {
2068    .name               = TYPE_XLNX_AXIQSPI,
2069    .parent             = TYPE_SYS_BUS_DEVICE,
2070    .instance_size      = sizeof(XlnxAXIQSPI),
2071    .class_init         = axiqspi_class_init,
2072    .instance_init      = axiqspi_init,
2073    .interfaces         = (InterfaceInfo[]) {
2074        { TYPE_FDT_GENERIC_MMAP },
2075        { },
2076    },
2077};
2078
2079static void axiqspi_register_types(void)
2080{
2081    type_register_static(&axiqspi_info);
2082}
2083
2084type_init(axiqspi_register_types)
2085