qemu/hw/arm/smmuv3.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2014-2016 Broadcom Corporation
   3 * Copyright (c) 2017 Red Hat, Inc.
   4 * Written by Prem Mallappa, Eric Auger
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along
  16 * with this program; if not, see <http://www.gnu.org/licenses/>.
  17 */
  18
  19#include "qemu/osdep.h"
  20#include "qemu/bitops.h"
  21#include "hw/irq.h"
  22#include "hw/sysbus.h"
  23#include "migration/vmstate.h"
  24#include "hw/qdev-core.h"
  25#include "hw/pci/pci.h"
  26#include "exec/address-spaces.h"
  27#include "cpu.h"
  28#include "trace.h"
  29#include "qemu/log.h"
  30#include "qemu/error-report.h"
  31#include "qapi/error.h"
  32
  33#include "hw/arm/smmuv3.h"
  34#include "smmuv3-internal.h"
  35#include "smmu-internal.h"
  36
  37/**
  38 * smmuv3_trigger_irq - pulse @irq if enabled and update
  39 * GERROR register in case of GERROR interrupt
  40 *
  41 * @irq: irq type
  42 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
  43 */
  44static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
  45                               uint32_t gerror_mask)
  46{
  47
  48    bool pulse = false;
  49
  50    switch (irq) {
  51    case SMMU_IRQ_EVTQ:
  52        pulse = smmuv3_eventq_irq_enabled(s);
  53        break;
  54    case SMMU_IRQ_PRIQ:
  55        qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
  56        break;
  57    case SMMU_IRQ_CMD_SYNC:
  58        pulse = true;
  59        break;
  60    case SMMU_IRQ_GERROR:
  61    {
  62        uint32_t pending = s->gerror ^ s->gerrorn;
  63        uint32_t new_gerrors = ~pending & gerror_mask;
  64
  65        if (!new_gerrors) {
  66            /* only toggle non pending errors */
  67            return;
  68        }
  69        s->gerror ^= new_gerrors;
  70        trace_smmuv3_write_gerror(new_gerrors, s->gerror);
  71
  72        pulse = smmuv3_gerror_irq_enabled(s);
  73        break;
  74    }
  75    }
  76    if (pulse) {
  77            trace_smmuv3_trigger_irq(irq);
  78            qemu_irq_pulse(s->irq[irq]);
  79    }
  80}
  81
  82static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
  83{
  84    uint32_t pending = s->gerror ^ s->gerrorn;
  85    uint32_t toggled = s->gerrorn ^ new_gerrorn;
  86
  87    if (toggled & ~pending) {
  88        qemu_log_mask(LOG_GUEST_ERROR,
  89                      "guest toggles non pending errors = 0x%x\n",
  90                      toggled & ~pending);
  91    }
  92
  93    /*
  94     * We do not raise any error in case guest toggles bits corresponding
  95     * to not active IRQs (CONSTRAINED UNPREDICTABLE)
  96     */
  97    s->gerrorn = new_gerrorn;
  98
  99    trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
 100}
 101
 102static inline MemTxResult queue_read(SMMUQueue *q, void *data)
 103{
 104    dma_addr_t addr = Q_CONS_ENTRY(q);
 105
 106    return dma_memory_read(&address_space_memory, addr, data, q->entry_size);
 107}
 108
 109static MemTxResult queue_write(SMMUQueue *q, void *data)
 110{
 111    dma_addr_t addr = Q_PROD_ENTRY(q);
 112    MemTxResult ret;
 113
 114    ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size);
 115    if (ret != MEMTX_OK) {
 116        return ret;
 117    }
 118
 119    queue_prod_incr(q);
 120    return MEMTX_OK;
 121}
 122
 123static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
 124{
 125    SMMUQueue *q = &s->eventq;
 126    MemTxResult r;
 127
 128    if (!smmuv3_eventq_enabled(s)) {
 129        return MEMTX_ERROR;
 130    }
 131
 132    if (smmuv3_q_full(q)) {
 133        return MEMTX_ERROR;
 134    }
 135
 136    r = queue_write(q, evt);
 137    if (r != MEMTX_OK) {
 138        return r;
 139    }
 140
 141    if (!smmuv3_q_empty(q)) {
 142        smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
 143    }
 144    return MEMTX_OK;
 145}
 146
 147void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
 148{
 149    Evt evt = {};
 150    MemTxResult r;
 151
 152    if (!smmuv3_eventq_enabled(s)) {
 153        return;
 154    }
 155
 156    EVT_SET_TYPE(&evt, info->type);
 157    EVT_SET_SID(&evt, info->sid);
 158
 159    switch (info->type) {
 160    case SMMU_EVT_NONE:
 161        return;
 162    case SMMU_EVT_F_UUT:
 163        EVT_SET_SSID(&evt, info->u.f_uut.ssid);
 164        EVT_SET_SSV(&evt,  info->u.f_uut.ssv);
 165        EVT_SET_ADDR(&evt, info->u.f_uut.addr);
 166        EVT_SET_RNW(&evt,  info->u.f_uut.rnw);
 167        EVT_SET_PNU(&evt,  info->u.f_uut.pnu);
 168        EVT_SET_IND(&evt,  info->u.f_uut.ind);
 169        break;
 170    case SMMU_EVT_C_BAD_STREAMID:
 171        EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
 172        EVT_SET_SSV(&evt,  info->u.c_bad_streamid.ssv);
 173        break;
 174    case SMMU_EVT_F_STE_FETCH:
 175        EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
 176        EVT_SET_SSV(&evt,  info->u.f_ste_fetch.ssv);
 177        EVT_SET_ADDR2(&evt, info->u.f_ste_fetch.addr);
 178        break;
 179    case SMMU_EVT_C_BAD_STE:
 180        EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
 181        EVT_SET_SSV(&evt,  info->u.c_bad_ste.ssv);
 182        break;
 183    case SMMU_EVT_F_STREAM_DISABLED:
 184        break;
 185    case SMMU_EVT_F_TRANS_FORBIDDEN:
 186        EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
 187        EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
 188        break;
 189    case SMMU_EVT_C_BAD_SUBSTREAMID:
 190        EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
 191        break;
 192    case SMMU_EVT_F_CD_FETCH:
 193        EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
 194        EVT_SET_SSV(&evt,  info->u.f_cd_fetch.ssv);
 195        EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
 196        break;
 197    case SMMU_EVT_C_BAD_CD:
 198        EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
 199        EVT_SET_SSV(&evt,  info->u.c_bad_cd.ssv);
 200        break;
 201    case SMMU_EVT_F_WALK_EABT:
 202    case SMMU_EVT_F_TRANSLATION:
 203    case SMMU_EVT_F_ADDR_SIZE:
 204    case SMMU_EVT_F_ACCESS:
 205    case SMMU_EVT_F_PERMISSION:
 206        EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
 207        EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
 208        EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
 209        EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
 210        EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
 211        EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
 212        EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
 213        EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
 214        EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
 215        EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
 216        EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
 217        break;
 218    case SMMU_EVT_F_CFG_CONFLICT:
 219        EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
 220        EVT_SET_SSV(&evt,  info->u.f_cfg_conflict.ssv);
 221        break;
 222    /* rest is not implemented */
 223    case SMMU_EVT_F_BAD_ATS_TREQ:
 224    case SMMU_EVT_F_TLB_CONFLICT:
 225    case SMMU_EVT_E_PAGE_REQ:
 226    default:
 227        g_assert_not_reached();
 228    }
 229
 230    trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
 231    r = smmuv3_write_eventq(s, &evt);
 232    if (r != MEMTX_OK) {
 233        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
 234    }
 235    info->recorded = true;
 236}
 237
 238static void smmuv3_init_regs(SMMUv3State *s)
 239{
 240    /**
 241     * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
 242     *       multi-level stream table
 243     */
 244    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
 245    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
 246    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
 247    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
 248    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
 249    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
 250    /* terminated transaction will always be aborted/error returned */
 251    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
 252    /* 2-level stream table supported */
 253    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
 254
 255    s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
 256    s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
 257    s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS,   SMMU_CMDQS);
 258
 259    s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
 260    s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1);
 261
 262   /* 4K and 64K granule support */
 263    s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
 264    s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
 265    s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
 266
 267    s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
 268    s->cmdq.prod = 0;
 269    s->cmdq.cons = 0;
 270    s->cmdq.entry_size = sizeof(struct Cmd);
 271    s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
 272    s->eventq.prod = 0;
 273    s->eventq.cons = 0;
 274    s->eventq.entry_size = sizeof(struct Evt);
 275
 276    s->features = 0;
 277    s->sid_split = 0;
 278    s->aidr = 0x1;
 279}
 280
 281static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
 282                        SMMUEventInfo *event)
 283{
 284    int ret;
 285
 286    trace_smmuv3_get_ste(addr);
 287    /* TODO: guarantee 64-bit single-copy atomicity */
 288    ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf));
 289    if (ret != MEMTX_OK) {
 290        qemu_log_mask(LOG_GUEST_ERROR,
 291                      "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
 292        event->type = SMMU_EVT_F_STE_FETCH;
 293        event->u.f_ste_fetch.addr = addr;
 294        return -EINVAL;
 295    }
 296    return 0;
 297
 298}
 299
 300/* @ssid > 0 not supported yet */
 301static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
 302                       CD *buf, SMMUEventInfo *event)
 303{
 304    dma_addr_t addr = STE_CTXPTR(ste);
 305    int ret;
 306
 307    trace_smmuv3_get_cd(addr);
 308    /* TODO: guarantee 64-bit single-copy atomicity */
 309    ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf));
 310    if (ret != MEMTX_OK) {
 311        qemu_log_mask(LOG_GUEST_ERROR,
 312                      "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
 313        event->type = SMMU_EVT_F_CD_FETCH;
 314        event->u.f_ste_fetch.addr = addr;
 315        return -EINVAL;
 316    }
 317    return 0;
 318}
 319
 320/* Returns < 0 in case of invalid STE, 0 otherwise */
 321static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
 322                      STE *ste, SMMUEventInfo *event)
 323{
 324    uint32_t config;
 325
 326    if (!STE_VALID(ste)) {
 327        if (!event->inval_ste_allowed) {
 328            qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n");
 329        }
 330        goto bad_ste;
 331    }
 332
 333    config = STE_CONFIG(ste);
 334
 335    if (STE_CFG_ABORT(config)) {
 336        cfg->aborted = true;
 337        return 0;
 338    }
 339
 340    if (STE_CFG_BYPASS(config)) {
 341        cfg->bypassed = true;
 342        return 0;
 343    }
 344
 345    if (STE_CFG_S2_ENABLED(config)) {
 346        qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n");
 347        goto bad_ste;
 348    }
 349
 350    if (STE_S1CDMAX(ste) != 0) {
 351        qemu_log_mask(LOG_UNIMP,
 352                      "SMMUv3 does not support multiple context descriptors yet\n");
 353        goto bad_ste;
 354    }
 355
 356    if (STE_S1STALLD(ste)) {
 357        qemu_log_mask(LOG_UNIMP,
 358                      "SMMUv3 S1 stalling fault model not allowed yet\n");
 359        goto bad_ste;
 360    }
 361    return 0;
 362
 363bad_ste:
 364    event->type = SMMU_EVT_C_BAD_STE;
 365    return -EINVAL;
 366}
 367
 368/**
 369 * smmu_find_ste - Return the stream table entry associated
 370 * to the sid
 371 *
 372 * @s: smmuv3 handle
 373 * @sid: stream ID
 374 * @ste: returned stream table entry
 375 * @event: handle to an event info
 376 *
 377 * Supports linear and 2-level stream table
 378 * Return 0 on success, -EINVAL otherwise
 379 */
 380static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
 381                         SMMUEventInfo *event)
 382{
 383    dma_addr_t addr, strtab_base;
 384    uint32_t log2size;
 385    int strtab_size_shift;
 386    int ret;
 387
 388    trace_smmuv3_find_ste(sid, s->features, s->sid_split);
 389    log2size = FIELD_EX32(s->strtab_base_cfg, STRTAB_BASE_CFG, LOG2SIZE);
 390    /*
 391     * Check SID range against both guest-configured and implementation limits
 392     */
 393    if (sid >= (1 << MIN(log2size, SMMU_IDR1_SIDSIZE))) {
 394        event->type = SMMU_EVT_C_BAD_STREAMID;
 395        return -EINVAL;
 396    }
 397    if (s->features & SMMU_FEATURE_2LVL_STE) {
 398        int l1_ste_offset, l2_ste_offset, max_l2_ste, span;
 399        dma_addr_t l1ptr, l2ptr;
 400        STEDesc l1std;
 401
 402        /*
 403         * Align strtab base address to table size. For this purpose, assume it
 404         * is not bounded by SMMU_IDR1_SIDSIZE.
 405         */
 406        strtab_size_shift = MAX(5, (int)log2size - s->sid_split - 1 + 3);
 407        strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
 408                      ~MAKE_64BIT_MASK(0, strtab_size_shift);
 409        l1_ste_offset = sid >> s->sid_split;
 410        l2_ste_offset = sid & ((1 << s->sid_split) - 1);
 411        l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
 412        /* TODO: guarantee 64-bit single-copy atomicity */
 413        ret = dma_memory_read(&address_space_memory, l1ptr, &l1std,
 414                              sizeof(l1std));
 415        if (ret != MEMTX_OK) {
 416            qemu_log_mask(LOG_GUEST_ERROR,
 417                          "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
 418            event->type = SMMU_EVT_F_STE_FETCH;
 419            event->u.f_ste_fetch.addr = l1ptr;
 420            return -EINVAL;
 421        }
 422
 423        span = L1STD_SPAN(&l1std);
 424
 425        if (!span) {
 426            /* l2ptr is not valid */
 427            if (!event->inval_ste_allowed) {
 428                qemu_log_mask(LOG_GUEST_ERROR,
 429                              "invalid sid=%d (L1STD span=0)\n", sid);
 430            }
 431            event->type = SMMU_EVT_C_BAD_STREAMID;
 432            return -EINVAL;
 433        }
 434        max_l2_ste = (1 << span) - 1;
 435        l2ptr = l1std_l2ptr(&l1std);
 436        trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
 437                                   l2ptr, l2_ste_offset, max_l2_ste);
 438        if (l2_ste_offset > max_l2_ste) {
 439            qemu_log_mask(LOG_GUEST_ERROR,
 440                          "l2_ste_offset=%d > max_l2_ste=%d\n",
 441                          l2_ste_offset, max_l2_ste);
 442            event->type = SMMU_EVT_C_BAD_STE;
 443            return -EINVAL;
 444        }
 445        addr = l2ptr + l2_ste_offset * sizeof(*ste);
 446    } else {
 447        strtab_size_shift = log2size + 5;
 448        strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
 449                      ~MAKE_64BIT_MASK(0, strtab_size_shift);
 450        addr = strtab_base + sid * sizeof(*ste);
 451    }
 452
 453    if (smmu_get_ste(s, addr, ste, event)) {
 454        return -EINVAL;
 455    }
 456
 457    return 0;
 458}
 459
 460static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
 461{
 462    int ret = -EINVAL;
 463    int i;
 464
 465    if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
 466        goto bad_cd;
 467    }
 468    if (!CD_A(cd)) {
 469        goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
 470    }
 471    if (CD_S(cd)) {
 472        goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
 473    }
 474    if (CD_HA(cd) || CD_HD(cd)) {
 475        goto bad_cd; /* HTTU = 0 */
 476    }
 477
 478    /* we support only those at the moment */
 479    cfg->aa64 = true;
 480    cfg->stage = 1;
 481
 482    cfg->oas = oas2bits(CD_IPS(cd));
 483    cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
 484    cfg->tbi = CD_TBI(cd);
 485    cfg->asid = CD_ASID(cd);
 486
 487    trace_smmuv3_decode_cd(cfg->oas);
 488
 489    /* decode data dependent on TT */
 490    for (i = 0; i <= 1; i++) {
 491        int tg, tsz;
 492        SMMUTransTableInfo *tt = &cfg->tt[i];
 493
 494        cfg->tt[i].disabled = CD_EPD(cd, i);
 495        if (cfg->tt[i].disabled) {
 496            continue;
 497        }
 498
 499        tsz = CD_TSZ(cd, i);
 500        if (tsz < 16 || tsz > 39) {
 501            goto bad_cd;
 502        }
 503
 504        tg = CD_TG(cd, i);
 505        tt->granule_sz = tg2granule(tg, i);
 506        if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) {
 507            goto bad_cd;
 508        }
 509
 510        tt->tsz = tsz;
 511        tt->ttb = CD_TTB(cd, i);
 512        if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
 513            goto bad_cd;
 514        }
 515        tt->had = CD_HAD(cd, i);
 516        trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had);
 517    }
 518
 519    event->record_trans_faults = CD_R(cd);
 520
 521    return 0;
 522
 523bad_cd:
 524    event->type = SMMU_EVT_C_BAD_CD;
 525    return ret;
 526}
 527
 528/**
 529 * smmuv3_decode_config - Prepare the translation configuration
 530 * for the @mr iommu region
 531 * @mr: iommu memory region the translation config must be prepared for
 532 * @cfg: output translation configuration which is populated through
 533 *       the different configuration decoding steps
 534 * @event: must be zero'ed by the caller
 535 *
 536 * return < 0 in case of config decoding error (@event is filled
 537 * accordingly). Return 0 otherwise.
 538 */
 539static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
 540                                SMMUEventInfo *event)
 541{
 542    SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
 543    uint32_t sid = smmu_get_sid(sdev);
 544    SMMUv3State *s = sdev->smmu;
 545    int ret;
 546    STE ste;
 547    CD cd;
 548
 549    ret = smmu_find_ste(s, sid, &ste, event);
 550    if (ret) {
 551        return ret;
 552    }
 553
 554    ret = decode_ste(s, cfg, &ste, event);
 555    if (ret) {
 556        return ret;
 557    }
 558
 559    if (cfg->aborted || cfg->bypassed) {
 560        return 0;
 561    }
 562
 563    ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event);
 564    if (ret) {
 565        return ret;
 566    }
 567
 568    return decode_cd(cfg, &cd, event);
 569}
 570
 571/**
 572 * smmuv3_get_config - Look up for a cached copy of configuration data for
 573 * @sdev and on cache miss performs a configuration structure decoding from
 574 * guest RAM.
 575 *
 576 * @sdev: SMMUDevice handle
 577 * @event: output event info
 578 *
 579 * The configuration cache contains data resulting from both STE and CD
 580 * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
 581 * by the SMMUDevice handle.
 582 */
 583static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event)
 584{
 585    SMMUv3State *s = sdev->smmu;
 586    SMMUState *bc = &s->smmu_state;
 587    SMMUTransCfg *cfg;
 588
 589    cfg = g_hash_table_lookup(bc->configs, sdev);
 590    if (cfg) {
 591        sdev->cfg_cache_hits++;
 592        trace_smmuv3_config_cache_hit(smmu_get_sid(sdev),
 593                            sdev->cfg_cache_hits, sdev->cfg_cache_misses,
 594                            100 * sdev->cfg_cache_hits /
 595                            (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
 596    } else {
 597        sdev->cfg_cache_misses++;
 598        trace_smmuv3_config_cache_miss(smmu_get_sid(sdev),
 599                            sdev->cfg_cache_hits, sdev->cfg_cache_misses,
 600                            100 * sdev->cfg_cache_hits /
 601                            (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
 602        cfg = g_new0(SMMUTransCfg, 1);
 603
 604        if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) {
 605            g_hash_table_insert(bc->configs, sdev, cfg);
 606        } else {
 607            g_free(cfg);
 608            cfg = NULL;
 609        }
 610    }
 611    return cfg;
 612}
 613
 614static void smmuv3_flush_config(SMMUDevice *sdev)
 615{
 616    SMMUv3State *s = sdev->smmu;
 617    SMMUState *bc = &s->smmu_state;
 618
 619    trace_smmuv3_config_cache_inv(smmu_get_sid(sdev));
 620    g_hash_table_remove(bc->configs, sdev);
 621}
 622
 623static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
 624                                      IOMMUAccessFlags flag, int iommu_idx)
 625{
 626    SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
 627    SMMUv3State *s = sdev->smmu;
 628    uint32_t sid = smmu_get_sid(sdev);
 629    SMMUEventInfo event = {.type = SMMU_EVT_NONE,
 630                           .sid = sid,
 631                           .inval_ste_allowed = false};
 632    SMMUPTWEventInfo ptw_info = {};
 633    SMMUTranslationStatus status;
 634    SMMUState *bs = ARM_SMMU(s);
 635    uint64_t page_mask, aligned_addr;
 636    SMMUTLBEntry *cached_entry = NULL;
 637    SMMUTransTableInfo *tt;
 638    SMMUTransCfg *cfg = NULL;
 639    IOMMUTLBEntry entry = {
 640        .target_as = &address_space_memory,
 641        .iova = addr,
 642        .translated_addr = addr,
 643        .addr_mask = ~(hwaddr)0,
 644        .perm = IOMMU_NONE,
 645    };
 646
 647    qemu_mutex_lock(&s->mutex);
 648
 649    if (!smmu_enabled(s)) {
 650        status = SMMU_TRANS_DISABLE;
 651        goto epilogue;
 652    }
 653
 654    cfg = smmuv3_get_config(sdev, &event);
 655    if (!cfg) {
 656        status = SMMU_TRANS_ERROR;
 657        goto epilogue;
 658    }
 659
 660    if (cfg->aborted) {
 661        status = SMMU_TRANS_ABORT;
 662        goto epilogue;
 663    }
 664
 665    if (cfg->bypassed) {
 666        status = SMMU_TRANS_BYPASS;
 667        goto epilogue;
 668    }
 669
 670    tt = select_tt(cfg, addr);
 671    if (!tt) {
 672        if (event.record_trans_faults) {
 673            event.type = SMMU_EVT_F_TRANSLATION;
 674            event.u.f_translation.addr = addr;
 675            event.u.f_translation.rnw = flag & 0x1;
 676        }
 677        status = SMMU_TRANS_ERROR;
 678        goto epilogue;
 679    }
 680
 681    page_mask = (1ULL << (tt->granule_sz)) - 1;
 682    aligned_addr = addr & ~page_mask;
 683
 684    cached_entry = smmu_iotlb_lookup(bs, cfg, tt, aligned_addr);
 685    if (cached_entry) {
 686        if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) {
 687            status = SMMU_TRANS_ERROR;
 688            if (event.record_trans_faults) {
 689                event.type = SMMU_EVT_F_PERMISSION;
 690                event.u.f_permission.addr = addr;
 691                event.u.f_permission.rnw = flag & 0x1;
 692            }
 693        } else {
 694            status = SMMU_TRANS_SUCCESS;
 695        }
 696        goto epilogue;
 697    }
 698
 699    cached_entry = g_new0(SMMUTLBEntry, 1);
 700
 701    if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) {
 702        g_free(cached_entry);
 703        switch (ptw_info.type) {
 704        case SMMU_PTW_ERR_WALK_EABT:
 705            event.type = SMMU_EVT_F_WALK_EABT;
 706            event.u.f_walk_eabt.addr = addr;
 707            event.u.f_walk_eabt.rnw = flag & 0x1;
 708            event.u.f_walk_eabt.class = 0x1;
 709            event.u.f_walk_eabt.addr2 = ptw_info.addr;
 710            break;
 711        case SMMU_PTW_ERR_TRANSLATION:
 712            if (event.record_trans_faults) {
 713                event.type = SMMU_EVT_F_TRANSLATION;
 714                event.u.f_translation.addr = addr;
 715                event.u.f_translation.rnw = flag & 0x1;
 716            }
 717            break;
 718        case SMMU_PTW_ERR_ADDR_SIZE:
 719            if (event.record_trans_faults) {
 720                event.type = SMMU_EVT_F_ADDR_SIZE;
 721                event.u.f_addr_size.addr = addr;
 722                event.u.f_addr_size.rnw = flag & 0x1;
 723            }
 724            break;
 725        case SMMU_PTW_ERR_ACCESS:
 726            if (event.record_trans_faults) {
 727                event.type = SMMU_EVT_F_ACCESS;
 728                event.u.f_access.addr = addr;
 729                event.u.f_access.rnw = flag & 0x1;
 730            }
 731            break;
 732        case SMMU_PTW_ERR_PERMISSION:
 733            if (event.record_trans_faults) {
 734                event.type = SMMU_EVT_F_PERMISSION;
 735                event.u.f_permission.addr = addr;
 736                event.u.f_permission.rnw = flag & 0x1;
 737            }
 738            break;
 739        default:
 740            g_assert_not_reached();
 741        }
 742        status = SMMU_TRANS_ERROR;
 743    } else {
 744        smmu_iotlb_insert(bs, cfg, cached_entry);
 745        status = SMMU_TRANS_SUCCESS;
 746    }
 747
 748epilogue:
 749    qemu_mutex_unlock(&s->mutex);
 750    switch (status) {
 751    case SMMU_TRANS_SUCCESS:
 752        entry.perm = flag;
 753        entry.translated_addr = cached_entry->entry.translated_addr +
 754                                    (addr & cached_entry->entry.addr_mask);
 755        entry.addr_mask = cached_entry->entry.addr_mask;
 756        trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr,
 757                                       entry.translated_addr, entry.perm);
 758        break;
 759    case SMMU_TRANS_DISABLE:
 760        entry.perm = flag;
 761        entry.addr_mask = ~TARGET_PAGE_MASK;
 762        trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr,
 763                                      entry.perm);
 764        break;
 765    case SMMU_TRANS_BYPASS:
 766        entry.perm = flag;
 767        entry.addr_mask = ~TARGET_PAGE_MASK;
 768        trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr,
 769                                      entry.perm);
 770        break;
 771    case SMMU_TRANS_ABORT:
 772        /* no event is recorded on abort */
 773        trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr,
 774                                     entry.perm);
 775        break;
 776    case SMMU_TRANS_ERROR:
 777        qemu_log_mask(LOG_GUEST_ERROR,
 778                      "%s translation failed for iova=0x%"PRIx64"(%s)\n",
 779                      mr->parent_obj.name, addr, smmu_event_string(event.type));
 780        smmuv3_record_event(s, &event);
 781        break;
 782    }
 783
 784    return entry;
 785}
 786
 787/**
 788 * smmuv3_notify_iova - call the notifier @n for a given
 789 * @asid and @iova tuple.
 790 *
 791 * @mr: IOMMU mr region handle
 792 * @n: notifier to be called
 793 * @asid: address space ID or negative value if we don't care
 794 * @iova: iova
 795 * @tg: translation granule (if communicated through range invalidation)
 796 * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1
 797 */
 798static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
 799                               IOMMUNotifier *n,
 800                               int asid, dma_addr_t iova,
 801                               uint8_t tg, uint64_t num_pages)
 802{
 803    SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
 804    IOMMUTLBEvent event;
 805    uint8_t granule;
 806
 807    if (!tg) {
 808        SMMUEventInfo event = {.inval_ste_allowed = true};
 809        SMMUTransCfg *cfg = smmuv3_get_config(sdev, &event);
 810        SMMUTransTableInfo *tt;
 811
 812        if (!cfg) {
 813            return;
 814        }
 815
 816        if (asid >= 0 && cfg->asid != asid) {
 817            return;
 818        }
 819
 820        tt = select_tt(cfg, iova);
 821        if (!tt) {
 822            return;
 823        }
 824        granule = tt->granule_sz;
 825    } else {
 826        granule = tg * 2 + 10;
 827    }
 828
 829    event.type = IOMMU_NOTIFIER_UNMAP;
 830    event.entry.target_as = &address_space_memory;
 831    event.entry.iova = iova;
 832    event.entry.addr_mask = num_pages * (1 << granule) - 1;
 833    event.entry.perm = IOMMU_NONE;
 834
 835    memory_region_notify_iommu_one(n, &event);
 836}
 837
 838/* invalidate an asid/iova range tuple in all mr's */
 839static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova,
 840                                      uint8_t tg, uint64_t num_pages)
 841{
 842    SMMUDevice *sdev;
 843
 844    QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
 845        IOMMUMemoryRegion *mr = &sdev->iommu;
 846        IOMMUNotifier *n;
 847
 848        trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova,
 849                                        tg, num_pages);
 850
 851        IOMMU_NOTIFIER_FOREACH(n, mr) {
 852            smmuv3_notify_iova(mr, n, asid, iova, tg, num_pages);
 853        }
 854    }
 855}
 856
 857static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
 858{
 859    uint8_t scale = 0, num = 0, ttl = 0;
 860    dma_addr_t addr = CMD_ADDR(cmd);
 861    uint8_t type = CMD_TYPE(cmd);
 862    uint16_t vmid = CMD_VMID(cmd);
 863    bool leaf = CMD_LEAF(cmd);
 864    uint8_t tg = CMD_TG(cmd);
 865    uint64_t first_page = 0, last_page;
 866    uint64_t num_pages = 1;
 867    int asid = -1;
 868
 869    if (tg) {
 870        scale = CMD_SCALE(cmd);
 871        num = CMD_NUM(cmd);
 872        ttl = CMD_TTL(cmd);
 873        num_pages = (num + 1) * BIT_ULL(scale);
 874    }
 875
 876    if (type == SMMU_CMD_TLBI_NH_VA) {
 877        asid = CMD_ASID(cmd);
 878    }
 879
 880    /* Split invalidations into ^2 range invalidations */
 881    last_page = num_pages - 1;
 882    while (num_pages) {
 883        uint8_t granule = tg * 2 + 10;
 884        uint64_t mask, count;
 885
 886        mask = dma_aligned_pow2_mask(first_page, last_page, 64 - granule);
 887        count = mask + 1;
 888
 889        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, count, ttl, leaf);
 890        smmuv3_inv_notifiers_iova(s, asid, addr, tg, count);
 891        smmu_iotlb_inv_iova(s, asid, addr, tg, count, ttl);
 892
 893        num_pages -= count;
 894        first_page += count;
 895        addr += count * BIT_ULL(granule);
 896    }
 897}
 898
 899static gboolean
 900smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data)
 901{
 902    SMMUDevice *sdev = (SMMUDevice *)key;
 903    uint32_t sid = smmu_get_sid(sdev);
 904    SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data;
 905
 906    if (sid < sid_range->start || sid > sid_range->end) {
 907        return false;
 908    }
 909    trace_smmuv3_config_cache_inv(sid);
 910    return true;
 911}
 912
 913static int smmuv3_cmdq_consume(SMMUv3State *s)
 914{
 915    SMMUState *bs = ARM_SMMU(s);
 916    SMMUCmdError cmd_error = SMMU_CERROR_NONE;
 917    SMMUQueue *q = &s->cmdq;
 918    SMMUCommandType type = 0;
 919
 920    if (!smmuv3_cmdq_enabled(s)) {
 921        return 0;
 922    }
 923    /*
 924     * some commands depend on register values, typically CR0. In case those
 925     * register values change while handling the command, spec says it
 926     * is UNPREDICTABLE whether the command is interpreted under the new
 927     * or old value.
 928     */
 929
 930    while (!smmuv3_q_empty(q)) {
 931        uint32_t pending = s->gerror ^ s->gerrorn;
 932        Cmd cmd;
 933
 934        trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
 935                                  Q_PROD_WRAP(q), Q_CONS_WRAP(q));
 936
 937        if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
 938            break;
 939        }
 940
 941        if (queue_read(q, &cmd) != MEMTX_OK) {
 942            cmd_error = SMMU_CERROR_ABT;
 943            break;
 944        }
 945
 946        type = CMD_TYPE(&cmd);
 947
 948        trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
 949
 950        qemu_mutex_lock(&s->mutex);
 951        switch (type) {
 952        case SMMU_CMD_SYNC:
 953            if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
 954                smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
 955            }
 956            break;
 957        case SMMU_CMD_PREFETCH_CONFIG:
 958        case SMMU_CMD_PREFETCH_ADDR:
 959            break;
 960        case SMMU_CMD_CFGI_STE:
 961        {
 962            uint32_t sid = CMD_SID(&cmd);
 963            IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
 964            SMMUDevice *sdev;
 965
 966            if (CMD_SSEC(&cmd)) {
 967                cmd_error = SMMU_CERROR_ILL;
 968                break;
 969            }
 970
 971            if (!mr) {
 972                break;
 973            }
 974
 975            trace_smmuv3_cmdq_cfgi_ste(sid);
 976            sdev = container_of(mr, SMMUDevice, iommu);
 977            smmuv3_flush_config(sdev);
 978
 979            break;
 980        }
 981        case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
 982        {
 983            uint32_t sid = CMD_SID(&cmd), mask;
 984            uint8_t range = CMD_STE_RANGE(&cmd);
 985            SMMUSIDRange sid_range;
 986
 987            if (CMD_SSEC(&cmd)) {
 988                cmd_error = SMMU_CERROR_ILL;
 989                break;
 990            }
 991
 992            mask = (1ULL << (range + 1)) - 1;
 993            sid_range.start = sid & ~mask;
 994            sid_range.end = sid_range.start + mask;
 995
 996            trace_smmuv3_cmdq_cfgi_ste_range(sid_range.start, sid_range.end);
 997            g_hash_table_foreach_remove(bs->configs, smmuv3_invalidate_ste,
 998                                        &sid_range);
 999            break;
1000        }
1001        case SMMU_CMD_CFGI_CD:
1002        case SMMU_CMD_CFGI_CD_ALL:
1003        {
1004            uint32_t sid = CMD_SID(&cmd);
1005            IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
1006            SMMUDevice *sdev;
1007
1008            if (CMD_SSEC(&cmd)) {
1009                cmd_error = SMMU_CERROR_ILL;
1010                break;
1011            }
1012
1013            if (!mr) {
1014                break;
1015            }
1016
1017            trace_smmuv3_cmdq_cfgi_cd(sid);
1018            sdev = container_of(mr, SMMUDevice, iommu);
1019            smmuv3_flush_config(sdev);
1020            break;
1021        }
1022        case SMMU_CMD_TLBI_NH_ASID:
1023        {
1024            uint16_t asid = CMD_ASID(&cmd);
1025
1026            trace_smmuv3_cmdq_tlbi_nh_asid(asid);
1027            smmu_inv_notifiers_all(&s->smmu_state);
1028            smmu_iotlb_inv_asid(bs, asid);
1029            break;
1030        }
1031        case SMMU_CMD_TLBI_NH_ALL:
1032        case SMMU_CMD_TLBI_NSNH_ALL:
1033            trace_smmuv3_cmdq_tlbi_nh();
1034            smmu_inv_notifiers_all(&s->smmu_state);
1035            smmu_iotlb_inv_all(bs);
1036            break;
1037        case SMMU_CMD_TLBI_NH_VAA:
1038        case SMMU_CMD_TLBI_NH_VA:
1039            smmuv3_s1_range_inval(bs, &cmd);
1040            break;
1041        case SMMU_CMD_TLBI_EL3_ALL:
1042        case SMMU_CMD_TLBI_EL3_VA:
1043        case SMMU_CMD_TLBI_EL2_ALL:
1044        case SMMU_CMD_TLBI_EL2_ASID:
1045        case SMMU_CMD_TLBI_EL2_VA:
1046        case SMMU_CMD_TLBI_EL2_VAA:
1047        case SMMU_CMD_TLBI_S12_VMALL:
1048        case SMMU_CMD_TLBI_S2_IPA:
1049        case SMMU_CMD_ATC_INV:
1050        case SMMU_CMD_PRI_RESP:
1051        case SMMU_CMD_RESUME:
1052        case SMMU_CMD_STALL_TERM:
1053            trace_smmuv3_unhandled_cmd(type);
1054            break;
1055        default:
1056            cmd_error = SMMU_CERROR_ILL;
1057            qemu_log_mask(LOG_GUEST_ERROR,
1058                          "Illegal command type: %d\n", CMD_TYPE(&cmd));
1059            break;
1060        }
1061        qemu_mutex_unlock(&s->mutex);
1062        if (cmd_error) {
1063            break;
1064        }
1065        /*
1066         * We only increment the cons index after the completion of
1067         * the command. We do that because the SYNC returns immediately
1068         * and does not check the completion of previous commands
1069         */
1070        queue_cons_incr(q);
1071    }
1072
1073    if (cmd_error) {
1074        trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
1075        smmu_write_cmdq_err(s, cmd_error);
1076        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
1077    }
1078
1079    trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
1080                                  Q_PROD_WRAP(q), Q_CONS_WRAP(q));
1081
1082    return 0;
1083}
1084
1085static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
1086                               uint64_t data, MemTxAttrs attrs)
1087{
1088    switch (offset) {
1089    case A_GERROR_IRQ_CFG0:
1090        s->gerror_irq_cfg0 = data;
1091        return MEMTX_OK;
1092    case A_STRTAB_BASE:
1093        s->strtab_base = data;
1094        return MEMTX_OK;
1095    case A_CMDQ_BASE:
1096        s->cmdq.base = data;
1097        s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1098        if (s->cmdq.log2size > SMMU_CMDQS) {
1099            s->cmdq.log2size = SMMU_CMDQS;
1100        }
1101        return MEMTX_OK;
1102    case A_EVENTQ_BASE:
1103        s->eventq.base = data;
1104        s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1105        if (s->eventq.log2size > SMMU_EVENTQS) {
1106            s->eventq.log2size = SMMU_EVENTQS;
1107        }
1108        return MEMTX_OK;
1109    case A_EVENTQ_IRQ_CFG0:
1110        s->eventq_irq_cfg0 = data;
1111        return MEMTX_OK;
1112    default:
1113        qemu_log_mask(LOG_UNIMP,
1114                      "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
1115                      __func__, offset);
1116        return MEMTX_OK;
1117    }
1118}
1119
1120static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
1121                               uint64_t data, MemTxAttrs attrs)
1122{
1123    switch (offset) {
1124    case A_CR0:
1125        s->cr[0] = data;
1126        s->cr0ack = data & ~SMMU_CR0_RESERVED;
1127        /* in case the command queue has been enabled */
1128        smmuv3_cmdq_consume(s);
1129        return MEMTX_OK;
1130    case A_CR1:
1131        s->cr[1] = data;
1132        return MEMTX_OK;
1133    case A_CR2:
1134        s->cr[2] = data;
1135        return MEMTX_OK;
1136    case A_IRQ_CTRL:
1137        s->irq_ctrl = data;
1138        return MEMTX_OK;
1139    case A_GERRORN:
1140        smmuv3_write_gerrorn(s, data);
1141        /*
1142         * By acknowledging the CMDQ_ERR, SW may notify cmds can
1143         * be processed again
1144         */
1145        smmuv3_cmdq_consume(s);
1146        return MEMTX_OK;
1147    case A_GERROR_IRQ_CFG0: /* 64b */
1148        s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
1149        return MEMTX_OK;
1150    case A_GERROR_IRQ_CFG0 + 4:
1151        s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
1152        return MEMTX_OK;
1153    case A_GERROR_IRQ_CFG1:
1154        s->gerror_irq_cfg1 = data;
1155        return MEMTX_OK;
1156    case A_GERROR_IRQ_CFG2:
1157        s->gerror_irq_cfg2 = data;
1158        return MEMTX_OK;
1159    case A_STRTAB_BASE: /* 64b */
1160        s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
1161        return MEMTX_OK;
1162    case A_STRTAB_BASE + 4:
1163        s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
1164        return MEMTX_OK;
1165    case A_STRTAB_BASE_CFG:
1166        s->strtab_base_cfg = data;
1167        if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
1168            s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
1169            s->features |= SMMU_FEATURE_2LVL_STE;
1170        }
1171        return MEMTX_OK;
1172    case A_CMDQ_BASE: /* 64b */
1173        s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
1174        s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1175        if (s->cmdq.log2size > SMMU_CMDQS) {
1176            s->cmdq.log2size = SMMU_CMDQS;
1177        }
1178        return MEMTX_OK;
1179    case A_CMDQ_BASE + 4: /* 64b */
1180        s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
1181        return MEMTX_OK;
1182    case A_CMDQ_PROD:
1183        s->cmdq.prod = data;
1184        smmuv3_cmdq_consume(s);
1185        return MEMTX_OK;
1186    case A_CMDQ_CONS:
1187        s->cmdq.cons = data;
1188        return MEMTX_OK;
1189    case A_EVENTQ_BASE: /* 64b */
1190        s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
1191        s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1192        if (s->eventq.log2size > SMMU_EVENTQS) {
1193            s->eventq.log2size = SMMU_EVENTQS;
1194        }
1195        return MEMTX_OK;
1196    case A_EVENTQ_BASE + 4:
1197        s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
1198        return MEMTX_OK;
1199    case A_EVENTQ_PROD:
1200        s->eventq.prod = data;
1201        return MEMTX_OK;
1202    case A_EVENTQ_CONS:
1203        s->eventq.cons = data;
1204        return MEMTX_OK;
1205    case A_EVENTQ_IRQ_CFG0: /* 64b */
1206        s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
1207        return MEMTX_OK;
1208    case A_EVENTQ_IRQ_CFG0 + 4:
1209        s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
1210        return MEMTX_OK;
1211    case A_EVENTQ_IRQ_CFG1:
1212        s->eventq_irq_cfg1 = data;
1213        return MEMTX_OK;
1214    case A_EVENTQ_IRQ_CFG2:
1215        s->eventq_irq_cfg2 = data;
1216        return MEMTX_OK;
1217    default:
1218        qemu_log_mask(LOG_UNIMP,
1219                      "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
1220                      __func__, offset);
1221        return MEMTX_OK;
1222    }
1223}
1224
1225static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
1226                                   unsigned size, MemTxAttrs attrs)
1227{
1228    SMMUState *sys = opaque;
1229    SMMUv3State *s = ARM_SMMUV3(sys);
1230    MemTxResult r;
1231
1232    /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1233    offset &= ~0x10000;
1234
1235    switch (size) {
1236    case 8:
1237        r = smmu_writell(s, offset, data, attrs);
1238        break;
1239    case 4:
1240        r = smmu_writel(s, offset, data, attrs);
1241        break;
1242    default:
1243        r = MEMTX_ERROR;
1244        break;
1245    }
1246
1247    trace_smmuv3_write_mmio(offset, data, size, r);
1248    return r;
1249}
1250
1251static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
1252                               uint64_t *data, MemTxAttrs attrs)
1253{
1254    switch (offset) {
1255    case A_GERROR_IRQ_CFG0:
1256        *data = s->gerror_irq_cfg0;
1257        return MEMTX_OK;
1258    case A_STRTAB_BASE:
1259        *data = s->strtab_base;
1260        return MEMTX_OK;
1261    case A_CMDQ_BASE:
1262        *data = s->cmdq.base;
1263        return MEMTX_OK;
1264    case A_EVENTQ_BASE:
1265        *data = s->eventq.base;
1266        return MEMTX_OK;
1267    default:
1268        *data = 0;
1269        qemu_log_mask(LOG_UNIMP,
1270                      "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
1271                      __func__, offset);
1272        return MEMTX_OK;
1273    }
1274}
1275
1276static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
1277                              uint64_t *data, MemTxAttrs attrs)
1278{
1279    switch (offset) {
1280    case A_IDREGS ... A_IDREGS + 0x2f:
1281        *data = smmuv3_idreg(offset - A_IDREGS);
1282        return MEMTX_OK;
1283    case A_IDR0 ... A_IDR5:
1284        *data = s->idr[(offset - A_IDR0) / 4];
1285        return MEMTX_OK;
1286    case A_IIDR:
1287        *data = s->iidr;
1288        return MEMTX_OK;
1289    case A_AIDR:
1290        *data = s->aidr;
1291        return MEMTX_OK;
1292    case A_CR0:
1293        *data = s->cr[0];
1294        return MEMTX_OK;
1295    case A_CR0ACK:
1296        *data = s->cr0ack;
1297        return MEMTX_OK;
1298    case A_CR1:
1299        *data = s->cr[1];
1300        return MEMTX_OK;
1301    case A_CR2:
1302        *data = s->cr[2];
1303        return MEMTX_OK;
1304    case A_STATUSR:
1305        *data = s->statusr;
1306        return MEMTX_OK;
1307    case A_IRQ_CTRL:
1308    case A_IRQ_CTRL_ACK:
1309        *data = s->irq_ctrl;
1310        return MEMTX_OK;
1311    case A_GERROR:
1312        *data = s->gerror;
1313        return MEMTX_OK;
1314    case A_GERRORN:
1315        *data = s->gerrorn;
1316        return MEMTX_OK;
1317    case A_GERROR_IRQ_CFG0: /* 64b */
1318        *data = extract64(s->gerror_irq_cfg0, 0, 32);
1319        return MEMTX_OK;
1320    case A_GERROR_IRQ_CFG0 + 4:
1321        *data = extract64(s->gerror_irq_cfg0, 32, 32);
1322        return MEMTX_OK;
1323    case A_GERROR_IRQ_CFG1:
1324        *data = s->gerror_irq_cfg1;
1325        return MEMTX_OK;
1326    case A_GERROR_IRQ_CFG2:
1327        *data = s->gerror_irq_cfg2;
1328        return MEMTX_OK;
1329    case A_STRTAB_BASE: /* 64b */
1330        *data = extract64(s->strtab_base, 0, 32);
1331        return MEMTX_OK;
1332    case A_STRTAB_BASE + 4: /* 64b */
1333        *data = extract64(s->strtab_base, 32, 32);
1334        return MEMTX_OK;
1335    case A_STRTAB_BASE_CFG:
1336        *data = s->strtab_base_cfg;
1337        return MEMTX_OK;
1338    case A_CMDQ_BASE: /* 64b */
1339        *data = extract64(s->cmdq.base, 0, 32);
1340        return MEMTX_OK;
1341    case A_CMDQ_BASE + 4:
1342        *data = extract64(s->cmdq.base, 32, 32);
1343        return MEMTX_OK;
1344    case A_CMDQ_PROD:
1345        *data = s->cmdq.prod;
1346        return MEMTX_OK;
1347    case A_CMDQ_CONS:
1348        *data = s->cmdq.cons;
1349        return MEMTX_OK;
1350    case A_EVENTQ_BASE: /* 64b */
1351        *data = extract64(s->eventq.base, 0, 32);
1352        return MEMTX_OK;
1353    case A_EVENTQ_BASE + 4: /* 64b */
1354        *data = extract64(s->eventq.base, 32, 32);
1355        return MEMTX_OK;
1356    case A_EVENTQ_PROD:
1357        *data = s->eventq.prod;
1358        return MEMTX_OK;
1359    case A_EVENTQ_CONS:
1360        *data = s->eventq.cons;
1361        return MEMTX_OK;
1362    default:
1363        *data = 0;
1364        qemu_log_mask(LOG_UNIMP,
1365                      "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
1366                      __func__, offset);
1367        return MEMTX_OK;
1368    }
1369}
1370
1371static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
1372                                  unsigned size, MemTxAttrs attrs)
1373{
1374    SMMUState *sys = opaque;
1375    SMMUv3State *s = ARM_SMMUV3(sys);
1376    MemTxResult r;
1377
1378    /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1379    offset &= ~0x10000;
1380
1381    switch (size) {
1382    case 8:
1383        r = smmu_readll(s, offset, data, attrs);
1384        break;
1385    case 4:
1386        r = smmu_readl(s, offset, data, attrs);
1387        break;
1388    default:
1389        r = MEMTX_ERROR;
1390        break;
1391    }
1392
1393    trace_smmuv3_read_mmio(offset, *data, size, r);
1394    return r;
1395}
1396
1397static const MemoryRegionOps smmu_mem_ops = {
1398    .read_with_attrs = smmu_read_mmio,
1399    .write_with_attrs = smmu_write_mmio,
1400    .endianness = DEVICE_LITTLE_ENDIAN,
1401    .valid = {
1402        .min_access_size = 4,
1403        .max_access_size = 8,
1404    },
1405    .impl = {
1406        .min_access_size = 4,
1407        .max_access_size = 8,
1408    },
1409};
1410
1411static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
1412{
1413    int i;
1414
1415    for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
1416        sysbus_init_irq(dev, &s->irq[i]);
1417    }
1418}
1419
1420static void smmu_reset(DeviceState *dev)
1421{
1422    SMMUv3State *s = ARM_SMMUV3(dev);
1423    SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1424
1425    c->parent_reset(dev);
1426
1427    smmuv3_init_regs(s);
1428}
1429
1430static void smmu_realize(DeviceState *d, Error **errp)
1431{
1432    SMMUState *sys = ARM_SMMU(d);
1433    SMMUv3State *s = ARM_SMMUV3(sys);
1434    SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1435    SysBusDevice *dev = SYS_BUS_DEVICE(d);
1436    Error *local_err = NULL;
1437
1438    c->parent_realize(d, &local_err);
1439    if (local_err) {
1440        error_propagate(errp, local_err);
1441        return;
1442    }
1443
1444    qemu_mutex_init(&s->mutex);
1445
1446    memory_region_init_io(&sys->iomem, OBJECT(s),
1447                          &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
1448
1449    sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
1450
1451    sysbus_init_mmio(dev, &sys->iomem);
1452
1453    smmu_init_irq(s, dev);
1454}
1455
1456static const VMStateDescription vmstate_smmuv3_queue = {
1457    .name = "smmuv3_queue",
1458    .version_id = 1,
1459    .minimum_version_id = 1,
1460    .fields = (VMStateField[]) {
1461        VMSTATE_UINT64(base, SMMUQueue),
1462        VMSTATE_UINT32(prod, SMMUQueue),
1463        VMSTATE_UINT32(cons, SMMUQueue),
1464        VMSTATE_UINT8(log2size, SMMUQueue),
1465        VMSTATE_END_OF_LIST(),
1466    },
1467};
1468
1469static const VMStateDescription vmstate_smmuv3 = {
1470    .name = "smmuv3",
1471    .version_id = 1,
1472    .minimum_version_id = 1,
1473    .priority = MIG_PRI_IOMMU,
1474    .fields = (VMStateField[]) {
1475        VMSTATE_UINT32(features, SMMUv3State),
1476        VMSTATE_UINT8(sid_size, SMMUv3State),
1477        VMSTATE_UINT8(sid_split, SMMUv3State),
1478
1479        VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
1480        VMSTATE_UINT32(cr0ack, SMMUv3State),
1481        VMSTATE_UINT32(statusr, SMMUv3State),
1482        VMSTATE_UINT32(irq_ctrl, SMMUv3State),
1483        VMSTATE_UINT32(gerror, SMMUv3State),
1484        VMSTATE_UINT32(gerrorn, SMMUv3State),
1485        VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
1486        VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
1487        VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
1488        VMSTATE_UINT64(strtab_base, SMMUv3State),
1489        VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
1490        VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
1491        VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
1492        VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
1493
1494        VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1495        VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1496
1497        VMSTATE_END_OF_LIST(),
1498    },
1499};
1500
1501static void smmuv3_instance_init(Object *obj)
1502{
1503    /* Nothing much to do here as of now */
1504}
1505
1506static void smmuv3_class_init(ObjectClass *klass, void *data)
1507{
1508    DeviceClass *dc = DEVICE_CLASS(klass);
1509    SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
1510
1511    dc->vmsd = &vmstate_smmuv3;
1512    device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset);
1513    c->parent_realize = dc->realize;
1514    dc->realize = smmu_realize;
1515}
1516
1517static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
1518                                      IOMMUNotifierFlag old,
1519                                      IOMMUNotifierFlag new,
1520                                      Error **errp)
1521{
1522    SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu);
1523    SMMUv3State *s3 = sdev->smmu;
1524    SMMUState *s = &(s3->smmu_state);
1525
1526    if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
1527        error_setg(errp, "SMMUv3 does not support dev-iotlb yet");
1528        return -EINVAL;
1529    }
1530
1531    if (new & IOMMU_NOTIFIER_MAP) {
1532        error_setg(errp,
1533                   "device %02x.%02x.%x requires iommu MAP notifier which is "
1534                   "not currently supported", pci_bus_num(sdev->bus),
1535                   PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn));
1536        return -EINVAL;
1537    }
1538
1539    if (old == IOMMU_NOTIFIER_NONE) {
1540        trace_smmuv3_notify_flag_add(iommu->parent_obj.name);
1541        QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next);
1542    } else if (new == IOMMU_NOTIFIER_NONE) {
1543        trace_smmuv3_notify_flag_del(iommu->parent_obj.name);
1544        QLIST_REMOVE(sdev, next);
1545    }
1546    return 0;
1547}
1548
1549static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
1550                                                  void *data)
1551{
1552    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1553
1554    imrc->translate = smmuv3_translate;
1555    imrc->notify_flag_changed = smmuv3_notify_flag_changed;
1556}
1557
1558static const TypeInfo smmuv3_type_info = {
1559    .name          = TYPE_ARM_SMMUV3,
1560    .parent        = TYPE_ARM_SMMU,
1561    .instance_size = sizeof(SMMUv3State),
1562    .instance_init = smmuv3_instance_init,
1563    .class_size    = sizeof(SMMUv3Class),
1564    .class_init    = smmuv3_class_init,
1565};
1566
1567static const TypeInfo smmuv3_iommu_memory_region_info = {
1568    .parent = TYPE_IOMMU_MEMORY_REGION,
1569    .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
1570    .class_init = smmuv3_iommu_memory_region_class_init,
1571};
1572
1573static void smmuv3_register_types(void)
1574{
1575    type_register(&smmuv3_type_info);
1576    type_register(&smmuv3_iommu_memory_region_info);
1577}
1578
1579type_init(smmuv3_register_types)
1580
1581