qemu/hw/cxl/cxl-component-utils.c
<<
>>
Prefs
   1/*
   2 * CXL Utility library for components
   3 *
   4 * Copyright(C) 2020 Intel Corporation.
   5 *
   6 * This work is licensed under the terms of the GNU GPL, version 2. See the
   7 * COPYING file in the top-level directory.
   8 */
   9
  10#include "qemu/osdep.h"
  11#include "qemu/log.h"
  12#include "qapi/error.h"
  13#include "hw/pci/pci.h"
  14#include "hw/cxl/cxl.h"
  15
  16static uint64_t cxl_cache_mem_read_reg(void *opaque, hwaddr offset,
  17                                       unsigned size)
  18{
  19    CXLComponentState *cxl_cstate = opaque;
  20    ComponentRegisters *cregs = &cxl_cstate->crb;
  21
  22    if (size == 8) {
  23        qemu_log_mask(LOG_UNIMP,
  24                      "CXL 8 byte cache mem registers not implemented\n");
  25        return 0;
  26    }
  27
  28    if (cregs->special_ops && cregs->special_ops->read) {
  29        return cregs->special_ops->read(cxl_cstate, offset, size);
  30    } else {
  31        return cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)];
  32    }
  33}
  34
  35static void dumb_hdm_handler(CXLComponentState *cxl_cstate, hwaddr offset,
  36                             uint32_t value)
  37{
  38    ComponentRegisters *cregs = &cxl_cstate->crb;
  39    uint32_t *cache_mem = cregs->cache_mem_registers;
  40    bool should_commit = false;
  41    bool should_uncommit = false;
  42
  43    switch (offset) {
  44    case A_CXL_HDM_DECODER0_CTRL:
  45        should_commit = FIELD_EX32(value, CXL_HDM_DECODER0_CTRL, COMMIT);
  46        should_uncommit = !should_commit;
  47        break;
  48    default:
  49        break;
  50    }
  51
  52    if (should_commit) {
  53        value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, ERR, 0);
  54        value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, COMMITTED, 1);
  55    } else if (should_uncommit) {
  56        value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, ERR, 0);
  57        value = FIELD_DP32(value, CXL_HDM_DECODER0_CTRL, COMMITTED, 0);
  58    }
  59    stl_le_p((uint8_t *)cache_mem + offset, value);
  60}
  61
  62static void cxl_cache_mem_write_reg(void *opaque, hwaddr offset, uint64_t value,
  63                                    unsigned size)
  64{
  65    CXLComponentState *cxl_cstate = opaque;
  66    ComponentRegisters *cregs = &cxl_cstate->crb;
  67    uint32_t mask;
  68
  69    if (size == 8) {
  70        qemu_log_mask(LOG_UNIMP,
  71                      "CXL 8 byte cache mem registers not implemented\n");
  72        return;
  73    }
  74    mask = cregs->cache_mem_regs_write_mask[offset / sizeof(*cregs->cache_mem_regs_write_mask)];
  75    value &= mask;
  76    /* RO bits should remain constant. Done by reading existing value */
  77    value |= ~mask & cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)];
  78    if (cregs->special_ops && cregs->special_ops->write) {
  79        cregs->special_ops->write(cxl_cstate, offset, value, size);
  80        return;
  81    }
  82
  83    if (offset >= A_CXL_HDM_DECODER_CAPABILITY &&
  84        offset <= A_CXL_HDM_DECODER0_TARGET_LIST_HI) {
  85        dumb_hdm_handler(cxl_cstate, offset, value);
  86    } else {
  87        cregs->cache_mem_registers[offset / sizeof(*cregs->cache_mem_registers)] = value;
  88    }
  89}
  90
  91/*
  92 * 8.2.3
  93 *   The access restrictions specified in Section 8.2.2 also apply to CXL 2.0
  94 *   Component Registers.
  95 *
  96 * 8.2.2
  97 *   • A 32 bit register shall be accessed as a 4 Bytes quantity. Partial
  98 *   reads are not permitted.
  99 *   • A 64 bit register shall be accessed as a 8 Bytes quantity. Partial
 100 *   reads are not permitted.
 101 *
 102 * As of the spec defined today, only 4 byte registers exist.
 103 */
 104static const MemoryRegionOps cache_mem_ops = {
 105    .read = cxl_cache_mem_read_reg,
 106    .write = cxl_cache_mem_write_reg,
 107    .endianness = DEVICE_LITTLE_ENDIAN,
 108    .valid = {
 109        .min_access_size = 4,
 110        .max_access_size = 8,
 111        .unaligned = false,
 112    },
 113    .impl = {
 114        .min_access_size = 4,
 115        .max_access_size = 8,
 116    },
 117};
 118
 119void cxl_component_register_block_init(Object *obj,
 120                                       CXLComponentState *cxl_cstate,
 121                                       const char *type)
 122{
 123    ComponentRegisters *cregs = &cxl_cstate->crb;
 124
 125    memory_region_init(&cregs->component_registers, obj, type,
 126                       CXL2_COMPONENT_BLOCK_SIZE);
 127
 128    /* io registers controls link which we don't care about in QEMU */
 129    memory_region_init_io(&cregs->io, obj, NULL, cregs, ".io",
 130                          CXL2_COMPONENT_IO_REGION_SIZE);
 131    memory_region_init_io(&cregs->cache_mem, obj, &cache_mem_ops, cregs,
 132                          ".cache_mem", CXL2_COMPONENT_CM_REGION_SIZE);
 133
 134    memory_region_add_subregion(&cregs->component_registers, 0, &cregs->io);
 135    memory_region_add_subregion(&cregs->component_registers,
 136                                CXL2_COMPONENT_IO_REGION_SIZE,
 137                                &cregs->cache_mem);
 138}
 139
 140static void ras_init_common(uint32_t *reg_state, uint32_t *write_msk)
 141{
 142    /*
 143     * Error status is RW1C but given bits are not yet set, it can
 144     * be handled as RO.
 145     */
 146    stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_STATUS, 0);
 147    stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_STATUS, 0x1cfff);
 148    /* Bits 12-13 and 17-31 reserved in CXL 2.0 */
 149    stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff);
 150    stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_MASK, 0x1cfff);
 151    stl_le_p(reg_state + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff);
 152    stl_le_p(write_msk + R_CXL_RAS_UNC_ERR_SEVERITY, 0x1cfff);
 153    stl_le_p(reg_state + R_CXL_RAS_COR_ERR_STATUS, 0);
 154    stl_le_p(write_msk + R_CXL_RAS_COR_ERR_STATUS, 0x7f);
 155    stl_le_p(reg_state + R_CXL_RAS_COR_ERR_MASK, 0x7f);
 156    stl_le_p(write_msk + R_CXL_RAS_COR_ERR_MASK, 0x7f);
 157    /* CXL switches and devices must set */
 158    stl_le_p(reg_state + R_CXL_RAS_ERR_CAP_CTRL, 0x200);
 159}
 160
 161static void hdm_init_common(uint32_t *reg_state, uint32_t *write_msk,
 162                            enum reg_type type)
 163{
 164    int decoder_count = 1;
 165    int i;
 166
 167    ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, DECODER_COUNT,
 168                     cxl_decoder_count_enc(decoder_count));
 169    ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, TARGET_COUNT, 1);
 170    ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_256B, 1);
 171    ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, INTERLEAVE_4K, 1);
 172    ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_CAPABILITY, POISON_ON_ERR_CAP, 0);
 173    ARRAY_FIELD_DP32(reg_state, CXL_HDM_DECODER_GLOBAL_CONTROL,
 174                     HDM_DECODER_ENABLE, 0);
 175    write_msk[R_CXL_HDM_DECODER_GLOBAL_CONTROL] = 0x3;
 176    for (i = 0; i < decoder_count; i++) {
 177        write_msk[R_CXL_HDM_DECODER0_BASE_LO + i * 0x20] = 0xf0000000;
 178        write_msk[R_CXL_HDM_DECODER0_BASE_HI + i * 0x20] = 0xffffffff;
 179        write_msk[R_CXL_HDM_DECODER0_SIZE_LO + i * 0x20] = 0xf0000000;
 180        write_msk[R_CXL_HDM_DECODER0_SIZE_HI + i * 0x20] = 0xffffffff;
 181        write_msk[R_CXL_HDM_DECODER0_CTRL + i * 0x20] = 0x13ff;
 182        if (type == CXL2_DEVICE ||
 183            type == CXL2_TYPE3_DEVICE ||
 184            type == CXL2_LOGICAL_DEVICE) {
 185            write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_LO + i * 0x20] = 0xf0000000;
 186        } else {
 187            write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_LO + i * 0x20] = 0xffffffff;
 188        }
 189        write_msk[R_CXL_HDM_DECODER0_TARGET_LIST_HI + i * 0x20] = 0xffffffff;
 190    }
 191}
 192
 193void cxl_component_register_init_common(uint32_t *reg_state, uint32_t *write_msk,
 194                                        enum reg_type type)
 195{
 196    int caps = 0;
 197
 198    /*
 199     * In CXL 2.0 the capabilities required for each CXL component are such that,
 200     * with the ordering chosen here, a single number can be used to define
 201     * which capabilities should be provided.
 202     */
 203    switch (type) {
 204    case CXL2_DOWNSTREAM_PORT:
 205    case CXL2_DEVICE:
 206        /* RAS, Link */
 207        caps = 2;
 208        break;
 209    case CXL2_UPSTREAM_PORT:
 210    case CXL2_TYPE3_DEVICE:
 211    case CXL2_LOGICAL_DEVICE:
 212        /* + HDM */
 213        caps = 3;
 214        break;
 215    case CXL2_ROOT_PORT:
 216        /* + Extended Security, + Snoop */
 217        caps = 5;
 218        break;
 219    default:
 220        abort();
 221    }
 222
 223    memset(reg_state, 0, CXL2_COMPONENT_CM_REGION_SIZE);
 224
 225    /* CXL Capability Header Register */
 226    ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ID, 1);
 227    ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, VERSION, 1);
 228    ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, CACHE_MEM_VERSION, 1);
 229    ARRAY_FIELD_DP32(reg_state, CXL_CAPABILITY_HEADER, ARRAY_SIZE, caps);
 230
 231#define init_cap_reg(reg, id, version)                                        \
 232    QEMU_BUILD_BUG_ON(CXL_##reg##_REGISTERS_OFFSET == 0);                     \
 233    do {                                                                      \
 234        int which = R_CXL_##reg##_CAPABILITY_HEADER;                          \
 235        reg_state[which] = FIELD_DP32(reg_state[which],                       \
 236                                      CXL_##reg##_CAPABILITY_HEADER, ID, id); \
 237        reg_state[which] =                                                    \
 238            FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER,       \
 239                       VERSION, version);                                     \
 240        reg_state[which] =                                                    \
 241            FIELD_DP32(reg_state[which], CXL_##reg##_CAPABILITY_HEADER, PTR,  \
 242                       CXL_##reg##_REGISTERS_OFFSET);                         \
 243    } while (0)
 244
 245    init_cap_reg(RAS, 2, 2);
 246    ras_init_common(reg_state, write_msk);
 247
 248    init_cap_reg(LINK, 4, 2);
 249
 250    if (caps < 3) {
 251        return;
 252    }
 253
 254    init_cap_reg(HDM, 5, 1);
 255    hdm_init_common(reg_state, write_msk, type);
 256
 257    if (caps < 5) {
 258        return;
 259    }
 260
 261    init_cap_reg(EXTSEC, 6, 1);
 262    init_cap_reg(SNOOP, 8, 1);
 263
 264#undef init_cap_reg
 265}
 266
 267/*
 268 * Helper to creates a DVSEC header for a CXL entity. The caller is responsible
 269 * for tracking the valid offset.
 270 *
 271 * This function will build the DVSEC header on behalf of the caller and then
 272 * copy in the remaining data for the vendor specific bits.
 273 * It will also set up appropriate write masks.
 274 */
 275void cxl_component_create_dvsec(CXLComponentState *cxl,
 276                                enum reg_type cxl_dev_type, uint16_t length,
 277                                uint16_t type, uint8_t rev, uint8_t *body)
 278{
 279    PCIDevice *pdev = cxl->pdev;
 280    uint16_t offset = cxl->dvsec_offset;
 281    uint8_t *wmask = pdev->wmask;
 282
 283    assert(offset >= PCI_CFG_SPACE_SIZE &&
 284           ((offset + length) < PCI_CFG_SPACE_EXP_SIZE));
 285    assert((length & 0xf000) == 0);
 286    assert((rev & ~0xf) == 0);
 287
 288    /* Create the DVSEC in the MCFG space */
 289    pcie_add_capability(pdev, PCI_EXT_CAP_ID_DVSEC, 1, offset, length);
 290    pci_set_long(pdev->config + offset + PCIE_DVSEC_HEADER1_OFFSET,
 291                 (length << 20) | (rev << 16) | CXL_VENDOR_ID);
 292    pci_set_word(pdev->config + offset + PCIE_DVSEC_ID_OFFSET, type);
 293    memcpy(pdev->config + offset + sizeof(DVSECHeader),
 294           body + sizeof(DVSECHeader),
 295           length - sizeof(DVSECHeader));
 296
 297    /* Configure write masks */
 298    switch (type) {
 299    case PCIE_CXL_DEVICE_DVSEC:
 300        /* Cntrl RW Lock - so needs explicit blocking when lock is set */
 301        wmask[offset + offsetof(CXLDVSECDevice, ctrl)] = 0xFD;
 302        wmask[offset + offsetof(CXLDVSECDevice, ctrl) + 1] = 0x4F;
 303        /* Status is RW1CS */
 304        wmask[offset + offsetof(CXLDVSECDevice, ctrl2)] = 0x0F;
 305       /* Lock is RW Once */
 306        wmask[offset + offsetof(CXLDVSECDevice, lock)] = 0x01;
 307        /* range1/2_base_high/low is RW Lock */
 308        wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi)] = 0xFF;
 309        wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 1] = 0xFF;
 310        wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 2] = 0xFF;
 311        wmask[offset + offsetof(CXLDVSECDevice, range1_base_hi) + 3] = 0xFF;
 312        wmask[offset + offsetof(CXLDVSECDevice, range1_base_lo) + 3] = 0xF0;
 313        wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi)] = 0xFF;
 314        wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 1] = 0xFF;
 315        wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 2] = 0xFF;
 316        wmask[offset + offsetof(CXLDVSECDevice, range2_base_hi) + 3] = 0xFF;
 317        wmask[offset + offsetof(CXLDVSECDevice, range2_base_lo) + 3] = 0xF0;
 318        break;
 319    case NON_CXL_FUNCTION_MAP_DVSEC:
 320        break; /* Not yet implemented */
 321    case EXTENSIONS_PORT_DVSEC:
 322        wmask[offset + offsetof(CXLDVSECPortExtensions, control)] = 0x0F;
 323        wmask[offset + offsetof(CXLDVSECPortExtensions, control) + 1] = 0x40;
 324        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_base)] = 0xFF;
 325        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_bus_limit)] = 0xFF;
 326        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base)] = 0xF0;
 327        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_base) + 1] = 0xFF;
 328        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit)] = 0xF0;
 329        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_memory_limit) + 1] = 0xFF;
 330        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base)] = 0xF0;
 331        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base) + 1] = 0xFF;
 332        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit)] = 0xF0;
 333        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit) + 1] = 0xFF;
 334        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high)] = 0xFF;
 335        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 1] = 0xFF;
 336        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 2] = 0xFF;
 337        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_base_high) + 3] = 0xFF;
 338        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high)] = 0xFF;
 339        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 1] = 0xFF;
 340        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 2] = 0xFF;
 341        wmask[offset + offsetof(CXLDVSECPortExtensions, alt_prefetch_limit_high) + 3] = 0xFF;
 342        break;
 343    case GPF_PORT_DVSEC:
 344        wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl)] = 0x0F;
 345        wmask[offset + offsetof(CXLDVSECPortGPF, phase1_ctrl) + 1] = 0x0F;
 346        wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl)] = 0x0F;
 347        wmask[offset + offsetof(CXLDVSECPortGPF, phase2_ctrl) + 1] = 0x0F;
 348        break;
 349    case GPF_DEVICE_DVSEC:
 350        wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration)] = 0x0F;
 351        wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_duration) + 1] = 0x0F;
 352        wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power)] = 0xFF;
 353        wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 1] = 0xFF;
 354        wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 2] = 0xFF;
 355        wmask[offset + offsetof(CXLDVSECDeviceGPF, phase2_power) + 3] = 0xFF;
 356        break;
 357    case PCIE_FLEXBUS_PORT_DVSEC:
 358        switch (cxl_dev_type) {
 359        case CXL2_ROOT_PORT:
 360            /* No MLD */
 361            wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xbd;
 362            break;
 363        case CXL2_DOWNSTREAM_PORT:
 364            wmask[offset + offsetof(CXLDVSECPortFlexBus, ctrl)] = 0xfd;
 365            break;
 366        default: /* Registers are RO for other component types */
 367            break;
 368        }
 369        /* There are rw1cs bits in the status register but never set currently */
 370        break;
 371    }
 372
 373    /* Update state for future DVSEC additions */
 374    range_init_nofail(&cxl->dvsecs[type], cxl->dvsec_offset, length);
 375    cxl->dvsec_offset += length;
 376}
 377
 378uint8_t cxl_interleave_ways_enc(int iw, Error **errp)
 379{
 380    switch (iw) {
 381    case 1: return 0x0;
 382    case 2: return 0x1;
 383    case 4: return 0x2;
 384    case 8: return 0x3;
 385    case 16: return 0x4;
 386    case 3: return 0x8;
 387    case 6: return 0x9;
 388    case 12: return 0xa;
 389    default:
 390        error_setg(errp, "Interleave ways: %d not supported", iw);
 391        return 0;
 392    }
 393}
 394
 395uint8_t cxl_interleave_granularity_enc(uint64_t gran, Error **errp)
 396{
 397    switch (gran) {
 398    case 256: return 0;
 399    case 512: return 1;
 400    case 1024: return 2;
 401    case 2048: return 3;
 402    case 4096: return 4;
 403    case 8192: return 5;
 404    case 16384: return 6;
 405    default:
 406        error_setg(errp, "Interleave granularity: %" PRIu64 " invalid", gran);
 407        return 0;
 408    }
 409}
 410