qemu/hw/arm/smmuv3.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2014-2016 Broadcom Corporation
   3 * Copyright (c) 2017 Red Hat, Inc.
   4 * Written by Prem Mallappa, Eric Auger
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along
  16 * with this program; if not, see <http://www.gnu.org/licenses/>.
  17 */
  18
  19#include "qemu/osdep.h"
  20#include "qemu/bitops.h"
  21#include "hw/irq.h"
  22#include "hw/sysbus.h"
  23#include "migration/vmstate.h"
  24#include "hw/qdev-core.h"
  25#include "hw/pci/pci.h"
  26#include "cpu.h"
  27#include "trace.h"
  28#include "qemu/log.h"
  29#include "qemu/error-report.h"
  30#include "qapi/error.h"
  31
  32#include "hw/arm/smmuv3.h"
  33#include "smmuv3-internal.h"
  34#include "smmu-internal.h"
  35
  36/**
  37 * smmuv3_trigger_irq - pulse @irq if enabled and update
  38 * GERROR register in case of GERROR interrupt
  39 *
  40 * @irq: irq type
  41 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
  42 */
  43static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
  44                               uint32_t gerror_mask)
  45{
  46
  47    bool pulse = false;
  48
  49    switch (irq) {
  50    case SMMU_IRQ_EVTQ:
  51        pulse = smmuv3_eventq_irq_enabled(s);
  52        break;
  53    case SMMU_IRQ_PRIQ:
  54        qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
  55        break;
  56    case SMMU_IRQ_CMD_SYNC:
  57        pulse = true;
  58        break;
  59    case SMMU_IRQ_GERROR:
  60    {
  61        uint32_t pending = s->gerror ^ s->gerrorn;
  62        uint32_t new_gerrors = ~pending & gerror_mask;
  63
  64        if (!new_gerrors) {
  65            /* only toggle non pending errors */
  66            return;
  67        }
  68        s->gerror ^= new_gerrors;
  69        trace_smmuv3_write_gerror(new_gerrors, s->gerror);
  70
  71        pulse = smmuv3_gerror_irq_enabled(s);
  72        break;
  73    }
  74    }
  75    if (pulse) {
  76            trace_smmuv3_trigger_irq(irq);
  77            qemu_irq_pulse(s->irq[irq]);
  78    }
  79}
  80
  81static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
  82{
  83    uint32_t pending = s->gerror ^ s->gerrorn;
  84    uint32_t toggled = s->gerrorn ^ new_gerrorn;
  85
  86    if (toggled & ~pending) {
  87        qemu_log_mask(LOG_GUEST_ERROR,
  88                      "guest toggles non pending errors = 0x%x\n",
  89                      toggled & ~pending);
  90    }
  91
  92    /*
  93     * We do not raise any error in case guest toggles bits corresponding
  94     * to not active IRQs (CONSTRAINED UNPREDICTABLE)
  95     */
  96    s->gerrorn = new_gerrorn;
  97
  98    trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
  99}
 100
 101static inline MemTxResult queue_read(SMMUQueue *q, void *data)
 102{
 103    dma_addr_t addr = Q_CONS_ENTRY(q);
 104
 105    return dma_memory_read(&address_space_memory, addr, data, q->entry_size);
 106}
 107
 108static MemTxResult queue_write(SMMUQueue *q, void *data)
 109{
 110    dma_addr_t addr = Q_PROD_ENTRY(q);
 111    MemTxResult ret;
 112
 113    ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size);
 114    if (ret != MEMTX_OK) {
 115        return ret;
 116    }
 117
 118    queue_prod_incr(q);
 119    return MEMTX_OK;
 120}
 121
 122static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
 123{
 124    SMMUQueue *q = &s->eventq;
 125    MemTxResult r;
 126
 127    if (!smmuv3_eventq_enabled(s)) {
 128        return MEMTX_ERROR;
 129    }
 130
 131    if (smmuv3_q_full(q)) {
 132        return MEMTX_ERROR;
 133    }
 134
 135    r = queue_write(q, evt);
 136    if (r != MEMTX_OK) {
 137        return r;
 138    }
 139
 140    if (!smmuv3_q_empty(q)) {
 141        smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
 142    }
 143    return MEMTX_OK;
 144}
 145
 146void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
 147{
 148    Evt evt = {};
 149    MemTxResult r;
 150
 151    if (!smmuv3_eventq_enabled(s)) {
 152        return;
 153    }
 154
 155    EVT_SET_TYPE(&evt, info->type);
 156    EVT_SET_SID(&evt, info->sid);
 157
 158    switch (info->type) {
 159    case SMMU_EVT_NONE:
 160        return;
 161    case SMMU_EVT_F_UUT:
 162        EVT_SET_SSID(&evt, info->u.f_uut.ssid);
 163        EVT_SET_SSV(&evt,  info->u.f_uut.ssv);
 164        EVT_SET_ADDR(&evt, info->u.f_uut.addr);
 165        EVT_SET_RNW(&evt,  info->u.f_uut.rnw);
 166        EVT_SET_PNU(&evt,  info->u.f_uut.pnu);
 167        EVT_SET_IND(&evt,  info->u.f_uut.ind);
 168        break;
 169    case SMMU_EVT_C_BAD_STREAMID:
 170        EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
 171        EVT_SET_SSV(&evt,  info->u.c_bad_streamid.ssv);
 172        break;
 173    case SMMU_EVT_F_STE_FETCH:
 174        EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
 175        EVT_SET_SSV(&evt,  info->u.f_ste_fetch.ssv);
 176        EVT_SET_ADDR2(&evt, info->u.f_ste_fetch.addr);
 177        break;
 178    case SMMU_EVT_C_BAD_STE:
 179        EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
 180        EVT_SET_SSV(&evt,  info->u.c_bad_ste.ssv);
 181        break;
 182    case SMMU_EVT_F_STREAM_DISABLED:
 183        break;
 184    case SMMU_EVT_F_TRANS_FORBIDDEN:
 185        EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
 186        EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
 187        break;
 188    case SMMU_EVT_C_BAD_SUBSTREAMID:
 189        EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
 190        break;
 191    case SMMU_EVT_F_CD_FETCH:
 192        EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
 193        EVT_SET_SSV(&evt,  info->u.f_cd_fetch.ssv);
 194        EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
 195        break;
 196    case SMMU_EVT_C_BAD_CD:
 197        EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
 198        EVT_SET_SSV(&evt,  info->u.c_bad_cd.ssv);
 199        break;
 200    case SMMU_EVT_F_WALK_EABT:
 201    case SMMU_EVT_F_TRANSLATION:
 202    case SMMU_EVT_F_ADDR_SIZE:
 203    case SMMU_EVT_F_ACCESS:
 204    case SMMU_EVT_F_PERMISSION:
 205        EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
 206        EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
 207        EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
 208        EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
 209        EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
 210        EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
 211        EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
 212        EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
 213        EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
 214        EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
 215        EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
 216        break;
 217    case SMMU_EVT_F_CFG_CONFLICT:
 218        EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
 219        EVT_SET_SSV(&evt,  info->u.f_cfg_conflict.ssv);
 220        break;
 221    /* rest is not implemented */
 222    case SMMU_EVT_F_BAD_ATS_TREQ:
 223    case SMMU_EVT_F_TLB_CONFLICT:
 224    case SMMU_EVT_E_PAGE_REQ:
 225    default:
 226        g_assert_not_reached();
 227    }
 228
 229    trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
 230    r = smmuv3_write_eventq(s, &evt);
 231    if (r != MEMTX_OK) {
 232        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
 233    }
 234    info->recorded = true;
 235}
 236
 237static void smmuv3_init_regs(SMMUv3State *s)
 238{
 239    /**
 240     * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
 241     *       multi-level stream table
 242     */
 243    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
 244    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
 245    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
 246    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
 247    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
 248    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
 249    /* terminated transaction will always be aborted/error returned */
 250    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
 251    /* 2-level stream table supported */
 252    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
 253
 254    s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
 255    s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
 256    s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS,   SMMU_CMDQS);
 257
 258    s->idr[3] = FIELD_DP32(s->idr[3], IDR3, RIL, 1);
 259    s->idr[3] = FIELD_DP32(s->idr[3], IDR3, HAD, 1);
 260
 261    /* 4K, 16K and 64K granule support */
 262    s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
 263    s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN16K, 1);
 264    s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
 265    s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
 266
 267    s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
 268    s->cmdq.prod = 0;
 269    s->cmdq.cons = 0;
 270    s->cmdq.entry_size = sizeof(struct Cmd);
 271    s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
 272    s->eventq.prod = 0;
 273    s->eventq.cons = 0;
 274    s->eventq.entry_size = sizeof(struct Evt);
 275
 276    s->features = 0;
 277    s->sid_split = 0;
 278    s->aidr = 0x1;
 279}
 280
 281static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
 282                        SMMUEventInfo *event)
 283{
 284    int ret;
 285
 286    trace_smmuv3_get_ste(addr);
 287    /* TODO: guarantee 64-bit single-copy atomicity */
 288    ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf));
 289    if (ret != MEMTX_OK) {
 290        qemu_log_mask(LOG_GUEST_ERROR,
 291                      "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
 292        event->type = SMMU_EVT_F_STE_FETCH;
 293        event->u.f_ste_fetch.addr = addr;
 294        return -EINVAL;
 295    }
 296    return 0;
 297
 298}
 299
 300/* @ssid > 0 not supported yet */
 301static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
 302                       CD *buf, SMMUEventInfo *event)
 303{
 304    dma_addr_t addr = STE_CTXPTR(ste);
 305    int ret;
 306
 307    trace_smmuv3_get_cd(addr);
 308    /* TODO: guarantee 64-bit single-copy atomicity */
 309    ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf));
 310    if (ret != MEMTX_OK) {
 311        qemu_log_mask(LOG_GUEST_ERROR,
 312                      "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
 313        event->type = SMMU_EVT_F_CD_FETCH;
 314        event->u.f_ste_fetch.addr = addr;
 315        return -EINVAL;
 316    }
 317    return 0;
 318}
 319
 320/* Returns < 0 in case of invalid STE, 0 otherwise */
 321static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
 322                      STE *ste, SMMUEventInfo *event)
 323{
 324    uint32_t config;
 325
 326    if (!STE_VALID(ste)) {
 327        if (!event->inval_ste_allowed) {
 328            qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n");
 329        }
 330        goto bad_ste;
 331    }
 332
 333    config = STE_CONFIG(ste);
 334
 335    if (STE_CFG_ABORT(config)) {
 336        cfg->aborted = true;
 337        return 0;
 338    }
 339
 340    if (STE_CFG_BYPASS(config)) {
 341        cfg->bypassed = true;
 342        return 0;
 343    }
 344
 345    if (STE_CFG_S2_ENABLED(config)) {
 346        qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n");
 347        goto bad_ste;
 348    }
 349
 350    if (STE_S1CDMAX(ste) != 0) {
 351        qemu_log_mask(LOG_UNIMP,
 352                      "SMMUv3 does not support multiple context descriptors yet\n");
 353        goto bad_ste;
 354    }
 355
 356    if (STE_S1STALLD(ste)) {
 357        qemu_log_mask(LOG_UNIMP,
 358                      "SMMUv3 S1 stalling fault model not allowed yet\n");
 359        goto bad_ste;
 360    }
 361    return 0;
 362
 363bad_ste:
 364    event->type = SMMU_EVT_C_BAD_STE;
 365    return -EINVAL;
 366}
 367
 368/**
 369 * smmu_find_ste - Return the stream table entry associated
 370 * to the sid
 371 *
 372 * @s: smmuv3 handle
 373 * @sid: stream ID
 374 * @ste: returned stream table entry
 375 * @event: handle to an event info
 376 *
 377 * Supports linear and 2-level stream table
 378 * Return 0 on success, -EINVAL otherwise
 379 */
 380static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
 381                         SMMUEventInfo *event)
 382{
 383    dma_addr_t addr, strtab_base;
 384    uint32_t log2size;
 385    int strtab_size_shift;
 386    int ret;
 387
 388    trace_smmuv3_find_ste(sid, s->features, s->sid_split);
 389    log2size = FIELD_EX32(s->strtab_base_cfg, STRTAB_BASE_CFG, LOG2SIZE);
 390    /*
 391     * Check SID range against both guest-configured and implementation limits
 392     */
 393    if (sid >= (1 << MIN(log2size, SMMU_IDR1_SIDSIZE))) {
 394        event->type = SMMU_EVT_C_BAD_STREAMID;
 395        return -EINVAL;
 396    }
 397    if (s->features & SMMU_FEATURE_2LVL_STE) {
 398        int l1_ste_offset, l2_ste_offset, max_l2_ste, span;
 399        dma_addr_t l1ptr, l2ptr;
 400        STEDesc l1std;
 401
 402        /*
 403         * Align strtab base address to table size. For this purpose, assume it
 404         * is not bounded by SMMU_IDR1_SIDSIZE.
 405         */
 406        strtab_size_shift = MAX(5, (int)log2size - s->sid_split - 1 + 3);
 407        strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
 408                      ~MAKE_64BIT_MASK(0, strtab_size_shift);
 409        l1_ste_offset = sid >> s->sid_split;
 410        l2_ste_offset = sid & ((1 << s->sid_split) - 1);
 411        l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
 412        /* TODO: guarantee 64-bit single-copy atomicity */
 413        ret = dma_memory_read(&address_space_memory, l1ptr, &l1std,
 414                              sizeof(l1std));
 415        if (ret != MEMTX_OK) {
 416            qemu_log_mask(LOG_GUEST_ERROR,
 417                          "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
 418            event->type = SMMU_EVT_F_STE_FETCH;
 419            event->u.f_ste_fetch.addr = l1ptr;
 420            return -EINVAL;
 421        }
 422
 423        span = L1STD_SPAN(&l1std);
 424
 425        if (!span) {
 426            /* l2ptr is not valid */
 427            if (!event->inval_ste_allowed) {
 428                qemu_log_mask(LOG_GUEST_ERROR,
 429                              "invalid sid=%d (L1STD span=0)\n", sid);
 430            }
 431            event->type = SMMU_EVT_C_BAD_STREAMID;
 432            return -EINVAL;
 433        }
 434        max_l2_ste = (1 << span) - 1;
 435        l2ptr = l1std_l2ptr(&l1std);
 436        trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
 437                                   l2ptr, l2_ste_offset, max_l2_ste);
 438        if (l2_ste_offset > max_l2_ste) {
 439            qemu_log_mask(LOG_GUEST_ERROR,
 440                          "l2_ste_offset=%d > max_l2_ste=%d\n",
 441                          l2_ste_offset, max_l2_ste);
 442            event->type = SMMU_EVT_C_BAD_STE;
 443            return -EINVAL;
 444        }
 445        addr = l2ptr + l2_ste_offset * sizeof(*ste);
 446    } else {
 447        strtab_size_shift = log2size + 5;
 448        strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK &
 449                      ~MAKE_64BIT_MASK(0, strtab_size_shift);
 450        addr = strtab_base + sid * sizeof(*ste);
 451    }
 452
 453    if (smmu_get_ste(s, addr, ste, event)) {
 454        return -EINVAL;
 455    }
 456
 457    return 0;
 458}
 459
 460static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
 461{
 462    int ret = -EINVAL;
 463    int i;
 464
 465    if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
 466        goto bad_cd;
 467    }
 468    if (!CD_A(cd)) {
 469        goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
 470    }
 471    if (CD_S(cd)) {
 472        goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
 473    }
 474    if (CD_HA(cd) || CD_HD(cd)) {
 475        goto bad_cd; /* HTTU = 0 */
 476    }
 477
 478    /* we support only those at the moment */
 479    cfg->aa64 = true;
 480    cfg->stage = 1;
 481
 482    cfg->oas = oas2bits(CD_IPS(cd));
 483    cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
 484    cfg->tbi = CD_TBI(cd);
 485    cfg->asid = CD_ASID(cd);
 486
 487    trace_smmuv3_decode_cd(cfg->oas);
 488
 489    /* decode data dependent on TT */
 490    for (i = 0; i <= 1; i++) {
 491        int tg, tsz;
 492        SMMUTransTableInfo *tt = &cfg->tt[i];
 493
 494        cfg->tt[i].disabled = CD_EPD(cd, i);
 495        if (cfg->tt[i].disabled) {
 496            continue;
 497        }
 498
 499        tsz = CD_TSZ(cd, i);
 500        if (tsz < 16 || tsz > 39) {
 501            goto bad_cd;
 502        }
 503
 504        tg = CD_TG(cd, i);
 505        tt->granule_sz = tg2granule(tg, i);
 506        if ((tt->granule_sz != 12 && tt->granule_sz != 14 &&
 507             tt->granule_sz != 16) || CD_ENDI(cd)) {
 508            goto bad_cd;
 509        }
 510
 511        tt->tsz = tsz;
 512        tt->ttb = CD_TTB(cd, i);
 513        if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
 514            goto bad_cd;
 515        }
 516        tt->had = CD_HAD(cd, i);
 517        trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz, tt->had);
 518    }
 519
 520    event->record_trans_faults = CD_R(cd);
 521
 522    return 0;
 523
 524bad_cd:
 525    event->type = SMMU_EVT_C_BAD_CD;
 526    return ret;
 527}
 528
 529/**
 530 * smmuv3_decode_config - Prepare the translation configuration
 531 * for the @mr iommu region
 532 * @mr: iommu memory region the translation config must be prepared for
 533 * @cfg: output translation configuration which is populated through
 534 *       the different configuration decoding steps
 535 * @event: must be zero'ed by the caller
 536 *
 537 * return < 0 in case of config decoding error (@event is filled
 538 * accordingly). Return 0 otherwise.
 539 */
 540static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
 541                                SMMUEventInfo *event)
 542{
 543    SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
 544    uint32_t sid = smmu_get_sid(sdev);
 545    SMMUv3State *s = sdev->smmu;
 546    int ret;
 547    STE ste;
 548    CD cd;
 549
 550    ret = smmu_find_ste(s, sid, &ste, event);
 551    if (ret) {
 552        return ret;
 553    }
 554
 555    ret = decode_ste(s, cfg, &ste, event);
 556    if (ret) {
 557        return ret;
 558    }
 559
 560    if (cfg->aborted || cfg->bypassed) {
 561        return 0;
 562    }
 563
 564    ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event);
 565    if (ret) {
 566        return ret;
 567    }
 568
 569    return decode_cd(cfg, &cd, event);
 570}
 571
 572/**
 573 * smmuv3_get_config - Look up for a cached copy of configuration data for
 574 * @sdev and on cache miss performs a configuration structure decoding from
 575 * guest RAM.
 576 *
 577 * @sdev: SMMUDevice handle
 578 * @event: output event info
 579 *
 580 * The configuration cache contains data resulting from both STE and CD
 581 * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
 582 * by the SMMUDevice handle.
 583 */
 584static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event)
 585{
 586    SMMUv3State *s = sdev->smmu;
 587    SMMUState *bc = &s->smmu_state;
 588    SMMUTransCfg *cfg;
 589
 590    cfg = g_hash_table_lookup(bc->configs, sdev);
 591    if (cfg) {
 592        sdev->cfg_cache_hits++;
 593        trace_smmuv3_config_cache_hit(smmu_get_sid(sdev),
 594                            sdev->cfg_cache_hits, sdev->cfg_cache_misses,
 595                            100 * sdev->cfg_cache_hits /
 596                            (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
 597    } else {
 598        sdev->cfg_cache_misses++;
 599        trace_smmuv3_config_cache_miss(smmu_get_sid(sdev),
 600                            sdev->cfg_cache_hits, sdev->cfg_cache_misses,
 601                            100 * sdev->cfg_cache_hits /
 602                            (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
 603        cfg = g_new0(SMMUTransCfg, 1);
 604
 605        if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) {
 606            g_hash_table_insert(bc->configs, sdev, cfg);
 607        } else {
 608            g_free(cfg);
 609            cfg = NULL;
 610        }
 611    }
 612    return cfg;
 613}
 614
 615static void smmuv3_flush_config(SMMUDevice *sdev)
 616{
 617    SMMUv3State *s = sdev->smmu;
 618    SMMUState *bc = &s->smmu_state;
 619
 620    trace_smmuv3_config_cache_inv(smmu_get_sid(sdev));
 621    g_hash_table_remove(bc->configs, sdev);
 622}
 623
 624static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
 625                                      IOMMUAccessFlags flag, int iommu_idx)
 626{
 627    SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
 628    SMMUv3State *s = sdev->smmu;
 629    uint32_t sid = smmu_get_sid(sdev);
 630    SMMUEventInfo event = {.type = SMMU_EVT_NONE,
 631                           .sid = sid,
 632                           .inval_ste_allowed = false};
 633    SMMUPTWEventInfo ptw_info = {};
 634    SMMUTranslationStatus status;
 635    SMMUState *bs = ARM_SMMU(s);
 636    uint64_t page_mask, aligned_addr;
 637    SMMUTLBEntry *cached_entry = NULL;
 638    SMMUTransTableInfo *tt;
 639    SMMUTransCfg *cfg = NULL;
 640    IOMMUTLBEntry entry = {
 641        .target_as = &address_space_memory,
 642        .iova = addr,
 643        .translated_addr = addr,
 644        .addr_mask = ~(hwaddr)0,
 645        .perm = IOMMU_NONE,
 646    };
 647
 648    qemu_mutex_lock(&s->mutex);
 649
 650    if (!smmu_enabled(s)) {
 651        status = SMMU_TRANS_DISABLE;
 652        goto epilogue;
 653    }
 654
 655    cfg = smmuv3_get_config(sdev, &event);
 656    if (!cfg) {
 657        status = SMMU_TRANS_ERROR;
 658        goto epilogue;
 659    }
 660
 661    if (cfg->aborted) {
 662        status = SMMU_TRANS_ABORT;
 663        goto epilogue;
 664    }
 665
 666    if (cfg->bypassed) {
 667        status = SMMU_TRANS_BYPASS;
 668        goto epilogue;
 669    }
 670
 671    tt = select_tt(cfg, addr);
 672    if (!tt) {
 673        if (event.record_trans_faults) {
 674            event.type = SMMU_EVT_F_TRANSLATION;
 675            event.u.f_translation.addr = addr;
 676            event.u.f_translation.rnw = flag & 0x1;
 677        }
 678        status = SMMU_TRANS_ERROR;
 679        goto epilogue;
 680    }
 681
 682    page_mask = (1ULL << (tt->granule_sz)) - 1;
 683    aligned_addr = addr & ~page_mask;
 684
 685    cached_entry = smmu_iotlb_lookup(bs, cfg, tt, aligned_addr);
 686    if (cached_entry) {
 687        if ((flag & IOMMU_WO) && !(cached_entry->entry.perm & IOMMU_WO)) {
 688            status = SMMU_TRANS_ERROR;
 689            if (event.record_trans_faults) {
 690                event.type = SMMU_EVT_F_PERMISSION;
 691                event.u.f_permission.addr = addr;
 692                event.u.f_permission.rnw = flag & 0x1;
 693            }
 694        } else {
 695            status = SMMU_TRANS_SUCCESS;
 696        }
 697        goto epilogue;
 698    }
 699
 700    cached_entry = g_new0(SMMUTLBEntry, 1);
 701
 702    if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) {
 703        g_free(cached_entry);
 704        switch (ptw_info.type) {
 705        case SMMU_PTW_ERR_WALK_EABT:
 706            event.type = SMMU_EVT_F_WALK_EABT;
 707            event.u.f_walk_eabt.addr = addr;
 708            event.u.f_walk_eabt.rnw = flag & 0x1;
 709            event.u.f_walk_eabt.class = 0x1;
 710            event.u.f_walk_eabt.addr2 = ptw_info.addr;
 711            break;
 712        case SMMU_PTW_ERR_TRANSLATION:
 713            if (event.record_trans_faults) {
 714                event.type = SMMU_EVT_F_TRANSLATION;
 715                event.u.f_translation.addr = addr;
 716                event.u.f_translation.rnw = flag & 0x1;
 717            }
 718            break;
 719        case SMMU_PTW_ERR_ADDR_SIZE:
 720            if (event.record_trans_faults) {
 721                event.type = SMMU_EVT_F_ADDR_SIZE;
 722                event.u.f_addr_size.addr = addr;
 723                event.u.f_addr_size.rnw = flag & 0x1;
 724            }
 725            break;
 726        case SMMU_PTW_ERR_ACCESS:
 727            if (event.record_trans_faults) {
 728                event.type = SMMU_EVT_F_ACCESS;
 729                event.u.f_access.addr = addr;
 730                event.u.f_access.rnw = flag & 0x1;
 731            }
 732            break;
 733        case SMMU_PTW_ERR_PERMISSION:
 734            if (event.record_trans_faults) {
 735                event.type = SMMU_EVT_F_PERMISSION;
 736                event.u.f_permission.addr = addr;
 737                event.u.f_permission.rnw = flag & 0x1;
 738            }
 739            break;
 740        default:
 741            g_assert_not_reached();
 742        }
 743        status = SMMU_TRANS_ERROR;
 744    } else {
 745        smmu_iotlb_insert(bs, cfg, cached_entry);
 746        status = SMMU_TRANS_SUCCESS;
 747    }
 748
 749epilogue:
 750    qemu_mutex_unlock(&s->mutex);
 751    switch (status) {
 752    case SMMU_TRANS_SUCCESS:
 753        entry.perm = flag;
 754        entry.translated_addr = cached_entry->entry.translated_addr +
 755                                    (addr & cached_entry->entry.addr_mask);
 756        entry.addr_mask = cached_entry->entry.addr_mask;
 757        trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr,
 758                                       entry.translated_addr, entry.perm);
 759        break;
 760    case SMMU_TRANS_DISABLE:
 761        entry.perm = flag;
 762        entry.addr_mask = ~TARGET_PAGE_MASK;
 763        trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr,
 764                                      entry.perm);
 765        break;
 766    case SMMU_TRANS_BYPASS:
 767        entry.perm = flag;
 768        entry.addr_mask = ~TARGET_PAGE_MASK;
 769        trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr,
 770                                      entry.perm);
 771        break;
 772    case SMMU_TRANS_ABORT:
 773        /* no event is recorded on abort */
 774        trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr,
 775                                     entry.perm);
 776        break;
 777    case SMMU_TRANS_ERROR:
 778        qemu_log_mask(LOG_GUEST_ERROR,
 779                      "%s translation failed for iova=0x%"PRIx64"(%s)\n",
 780                      mr->parent_obj.name, addr, smmu_event_string(event.type));
 781        smmuv3_record_event(s, &event);
 782        break;
 783    }
 784
 785    return entry;
 786}
 787
 788/**
 789 * smmuv3_notify_iova - call the notifier @n for a given
 790 * @asid and @iova tuple.
 791 *
 792 * @mr: IOMMU mr region handle
 793 * @n: notifier to be called
 794 * @asid: address space ID or negative value if we don't care
 795 * @iova: iova
 796 * @tg: translation granule (if communicated through range invalidation)
 797 * @num_pages: number of @granule sized pages (if tg != 0), otherwise 1
 798 */
 799static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
 800                               IOMMUNotifier *n,
 801                               int asid, dma_addr_t iova,
 802                               uint8_t tg, uint64_t num_pages)
 803{
 804    SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
 805    IOMMUTLBEvent event;
 806    uint8_t granule;
 807
 808    if (!tg) {
 809        SMMUEventInfo event = {.inval_ste_allowed = true};
 810        SMMUTransCfg *cfg = smmuv3_get_config(sdev, &event);
 811        SMMUTransTableInfo *tt;
 812
 813        if (!cfg) {
 814            return;
 815        }
 816
 817        if (asid >= 0 && cfg->asid != asid) {
 818            return;
 819        }
 820
 821        tt = select_tt(cfg, iova);
 822        if (!tt) {
 823            return;
 824        }
 825        granule = tt->granule_sz;
 826    } else {
 827        granule = tg * 2 + 10;
 828    }
 829
 830    event.type = IOMMU_NOTIFIER_UNMAP;
 831    event.entry.target_as = &address_space_memory;
 832    event.entry.iova = iova;
 833    event.entry.addr_mask = num_pages * (1 << granule) - 1;
 834    event.entry.perm = IOMMU_NONE;
 835
 836    memory_region_notify_iommu_one(n, &event);
 837}
 838
 839/* invalidate an asid/iova range tuple in all mr's */
 840static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova,
 841                                      uint8_t tg, uint64_t num_pages)
 842{
 843    SMMUDevice *sdev;
 844
 845    QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) {
 846        IOMMUMemoryRegion *mr = &sdev->iommu;
 847        IOMMUNotifier *n;
 848
 849        trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova,
 850                                        tg, num_pages);
 851
 852        IOMMU_NOTIFIER_FOREACH(n, mr) {
 853            smmuv3_notify_iova(mr, n, asid, iova, tg, num_pages);
 854        }
 855    }
 856}
 857
 858static void smmuv3_s1_range_inval(SMMUState *s, Cmd *cmd)
 859{
 860    dma_addr_t end, addr = CMD_ADDR(cmd);
 861    uint8_t type = CMD_TYPE(cmd);
 862    uint16_t vmid = CMD_VMID(cmd);
 863    uint8_t scale = CMD_SCALE(cmd);
 864    uint8_t num = CMD_NUM(cmd);
 865    uint8_t ttl = CMD_TTL(cmd);
 866    bool leaf = CMD_LEAF(cmd);
 867    uint8_t tg = CMD_TG(cmd);
 868    uint64_t num_pages;
 869    uint8_t granule;
 870    int asid = -1;
 871
 872    if (type == SMMU_CMD_TLBI_NH_VA) {
 873        asid = CMD_ASID(cmd);
 874    }
 875
 876    if (!tg) {
 877        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, 1, ttl, leaf);
 878        smmuv3_inv_notifiers_iova(s, asid, addr, tg, 1);
 879        smmu_iotlb_inv_iova(s, asid, addr, tg, 1, ttl);
 880        return;
 881    }
 882
 883    /* RIL in use */
 884
 885    num_pages = (num + 1) * BIT_ULL(scale);
 886    granule = tg * 2 + 10;
 887
 888    /* Split invalidations into ^2 range invalidations */
 889    end = addr + (num_pages << granule) - 1;
 890
 891    while (addr != end + 1) {
 892        uint64_t mask = dma_aligned_pow2_mask(addr, end, 64);
 893
 894        num_pages = (mask + 1) >> granule;
 895        trace_smmuv3_s1_range_inval(vmid, asid, addr, tg, num_pages, ttl, leaf);
 896        smmuv3_inv_notifiers_iova(s, asid, addr, tg, num_pages);
 897        smmu_iotlb_inv_iova(s, asid, addr, tg, num_pages, ttl);
 898        addr += mask + 1;
 899    }
 900}
 901
 902static gboolean
 903smmuv3_invalidate_ste(gpointer key, gpointer value, gpointer user_data)
 904{
 905    SMMUDevice *sdev = (SMMUDevice *)key;
 906    uint32_t sid = smmu_get_sid(sdev);
 907    SMMUSIDRange *sid_range = (SMMUSIDRange *)user_data;
 908
 909    if (sid < sid_range->start || sid > sid_range->end) {
 910        return false;
 911    }
 912    trace_smmuv3_config_cache_inv(sid);
 913    return true;
 914}
 915
 916static int smmuv3_cmdq_consume(SMMUv3State *s)
 917{
 918    SMMUState *bs = ARM_SMMU(s);
 919    SMMUCmdError cmd_error = SMMU_CERROR_NONE;
 920    SMMUQueue *q = &s->cmdq;
 921    SMMUCommandType type = 0;
 922
 923    if (!smmuv3_cmdq_enabled(s)) {
 924        return 0;
 925    }
 926    /*
 927     * some commands depend on register values, typically CR0. In case those
 928     * register values change while handling the command, spec says it
 929     * is UNPREDICTABLE whether the command is interpreted under the new
 930     * or old value.
 931     */
 932
 933    while (!smmuv3_q_empty(q)) {
 934        uint32_t pending = s->gerror ^ s->gerrorn;
 935        Cmd cmd;
 936
 937        trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
 938                                  Q_PROD_WRAP(q), Q_CONS_WRAP(q));
 939
 940        if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
 941            break;
 942        }
 943
 944        if (queue_read(q, &cmd) != MEMTX_OK) {
 945            cmd_error = SMMU_CERROR_ABT;
 946            break;
 947        }
 948
 949        type = CMD_TYPE(&cmd);
 950
 951        trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
 952
 953        qemu_mutex_lock(&s->mutex);
 954        switch (type) {
 955        case SMMU_CMD_SYNC:
 956            if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
 957                smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
 958            }
 959            break;
 960        case SMMU_CMD_PREFETCH_CONFIG:
 961        case SMMU_CMD_PREFETCH_ADDR:
 962            break;
 963        case SMMU_CMD_CFGI_STE:
 964        {
 965            uint32_t sid = CMD_SID(&cmd);
 966            IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
 967            SMMUDevice *sdev;
 968
 969            if (CMD_SSEC(&cmd)) {
 970                cmd_error = SMMU_CERROR_ILL;
 971                break;
 972            }
 973
 974            if (!mr) {
 975                break;
 976            }
 977
 978            trace_smmuv3_cmdq_cfgi_ste(sid);
 979            sdev = container_of(mr, SMMUDevice, iommu);
 980            smmuv3_flush_config(sdev);
 981
 982            break;
 983        }
 984        case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
 985        {
 986            uint32_t sid = CMD_SID(&cmd), mask;
 987            uint8_t range = CMD_STE_RANGE(&cmd);
 988            SMMUSIDRange sid_range;
 989
 990            if (CMD_SSEC(&cmd)) {
 991                cmd_error = SMMU_CERROR_ILL;
 992                break;
 993            }
 994
 995            mask = (1ULL << (range + 1)) - 1;
 996            sid_range.start = sid & ~mask;
 997            sid_range.end = sid_range.start + mask;
 998
 999            trace_smmuv3_cmdq_cfgi_ste_range(sid_range.start, sid_range.end);
1000            g_hash_table_foreach_remove(bs->configs, smmuv3_invalidate_ste,
1001                                        &sid_range);
1002            break;
1003        }
1004        case SMMU_CMD_CFGI_CD:
1005        case SMMU_CMD_CFGI_CD_ALL:
1006        {
1007            uint32_t sid = CMD_SID(&cmd);
1008            IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
1009            SMMUDevice *sdev;
1010
1011            if (CMD_SSEC(&cmd)) {
1012                cmd_error = SMMU_CERROR_ILL;
1013                break;
1014            }
1015
1016            if (!mr) {
1017                break;
1018            }
1019
1020            trace_smmuv3_cmdq_cfgi_cd(sid);
1021            sdev = container_of(mr, SMMUDevice, iommu);
1022            smmuv3_flush_config(sdev);
1023            break;
1024        }
1025        case SMMU_CMD_TLBI_NH_ASID:
1026        {
1027            uint16_t asid = CMD_ASID(&cmd);
1028
1029            trace_smmuv3_cmdq_tlbi_nh_asid(asid);
1030            smmu_inv_notifiers_all(&s->smmu_state);
1031            smmu_iotlb_inv_asid(bs, asid);
1032            break;
1033        }
1034        case SMMU_CMD_TLBI_NH_ALL:
1035        case SMMU_CMD_TLBI_NSNH_ALL:
1036            trace_smmuv3_cmdq_tlbi_nh();
1037            smmu_inv_notifiers_all(&s->smmu_state);
1038            smmu_iotlb_inv_all(bs);
1039            break;
1040        case SMMU_CMD_TLBI_NH_VAA:
1041        case SMMU_CMD_TLBI_NH_VA:
1042            smmuv3_s1_range_inval(bs, &cmd);
1043            break;
1044        case SMMU_CMD_TLBI_EL3_ALL:
1045        case SMMU_CMD_TLBI_EL3_VA:
1046        case SMMU_CMD_TLBI_EL2_ALL:
1047        case SMMU_CMD_TLBI_EL2_ASID:
1048        case SMMU_CMD_TLBI_EL2_VA:
1049        case SMMU_CMD_TLBI_EL2_VAA:
1050        case SMMU_CMD_TLBI_S12_VMALL:
1051        case SMMU_CMD_TLBI_S2_IPA:
1052        case SMMU_CMD_ATC_INV:
1053        case SMMU_CMD_PRI_RESP:
1054        case SMMU_CMD_RESUME:
1055        case SMMU_CMD_STALL_TERM:
1056            trace_smmuv3_unhandled_cmd(type);
1057            break;
1058        default:
1059            cmd_error = SMMU_CERROR_ILL;
1060            qemu_log_mask(LOG_GUEST_ERROR,
1061                          "Illegal command type: %d\n", CMD_TYPE(&cmd));
1062            break;
1063        }
1064        qemu_mutex_unlock(&s->mutex);
1065        if (cmd_error) {
1066            break;
1067        }
1068        /*
1069         * We only increment the cons index after the completion of
1070         * the command. We do that because the SYNC returns immediately
1071         * and does not check the completion of previous commands
1072         */
1073        queue_cons_incr(q);
1074    }
1075
1076    if (cmd_error) {
1077        trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
1078        smmu_write_cmdq_err(s, cmd_error);
1079        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
1080    }
1081
1082    trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
1083                                  Q_PROD_WRAP(q), Q_CONS_WRAP(q));
1084
1085    return 0;
1086}
1087
1088static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
1089                               uint64_t data, MemTxAttrs attrs)
1090{
1091    switch (offset) {
1092    case A_GERROR_IRQ_CFG0:
1093        s->gerror_irq_cfg0 = data;
1094        return MEMTX_OK;
1095    case A_STRTAB_BASE:
1096        s->strtab_base = data;
1097        return MEMTX_OK;
1098    case A_CMDQ_BASE:
1099        s->cmdq.base = data;
1100        s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1101        if (s->cmdq.log2size > SMMU_CMDQS) {
1102            s->cmdq.log2size = SMMU_CMDQS;
1103        }
1104        return MEMTX_OK;
1105    case A_EVENTQ_BASE:
1106        s->eventq.base = data;
1107        s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1108        if (s->eventq.log2size > SMMU_EVENTQS) {
1109            s->eventq.log2size = SMMU_EVENTQS;
1110        }
1111        return MEMTX_OK;
1112    case A_EVENTQ_IRQ_CFG0:
1113        s->eventq_irq_cfg0 = data;
1114        return MEMTX_OK;
1115    default:
1116        qemu_log_mask(LOG_UNIMP,
1117                      "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
1118                      __func__, offset);
1119        return MEMTX_OK;
1120    }
1121}
1122
1123static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
1124                               uint64_t data, MemTxAttrs attrs)
1125{
1126    switch (offset) {
1127    case A_CR0:
1128        s->cr[0] = data;
1129        s->cr0ack = data & ~SMMU_CR0_RESERVED;
1130        /* in case the command queue has been enabled */
1131        smmuv3_cmdq_consume(s);
1132        return MEMTX_OK;
1133    case A_CR1:
1134        s->cr[1] = data;
1135        return MEMTX_OK;
1136    case A_CR2:
1137        s->cr[2] = data;
1138        return MEMTX_OK;
1139    case A_IRQ_CTRL:
1140        s->irq_ctrl = data;
1141        return MEMTX_OK;
1142    case A_GERRORN:
1143        smmuv3_write_gerrorn(s, data);
1144        /*
1145         * By acknowledging the CMDQ_ERR, SW may notify cmds can
1146         * be processed again
1147         */
1148        smmuv3_cmdq_consume(s);
1149        return MEMTX_OK;
1150    case A_GERROR_IRQ_CFG0: /* 64b */
1151        s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
1152        return MEMTX_OK;
1153    case A_GERROR_IRQ_CFG0 + 4:
1154        s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
1155        return MEMTX_OK;
1156    case A_GERROR_IRQ_CFG1:
1157        s->gerror_irq_cfg1 = data;
1158        return MEMTX_OK;
1159    case A_GERROR_IRQ_CFG2:
1160        s->gerror_irq_cfg2 = data;
1161        return MEMTX_OK;
1162    case A_STRTAB_BASE: /* 64b */
1163        s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
1164        return MEMTX_OK;
1165    case A_STRTAB_BASE + 4:
1166        s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
1167        return MEMTX_OK;
1168    case A_STRTAB_BASE_CFG:
1169        s->strtab_base_cfg = data;
1170        if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
1171            s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
1172            s->features |= SMMU_FEATURE_2LVL_STE;
1173        }
1174        return MEMTX_OK;
1175    case A_CMDQ_BASE: /* 64b */
1176        s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
1177        s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1178        if (s->cmdq.log2size > SMMU_CMDQS) {
1179            s->cmdq.log2size = SMMU_CMDQS;
1180        }
1181        return MEMTX_OK;
1182    case A_CMDQ_BASE + 4: /* 64b */
1183        s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
1184        return MEMTX_OK;
1185    case A_CMDQ_PROD:
1186        s->cmdq.prod = data;
1187        smmuv3_cmdq_consume(s);
1188        return MEMTX_OK;
1189    case A_CMDQ_CONS:
1190        s->cmdq.cons = data;
1191        return MEMTX_OK;
1192    case A_EVENTQ_BASE: /* 64b */
1193        s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
1194        s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1195        if (s->eventq.log2size > SMMU_EVENTQS) {
1196            s->eventq.log2size = SMMU_EVENTQS;
1197        }
1198        return MEMTX_OK;
1199    case A_EVENTQ_BASE + 4:
1200        s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
1201        return MEMTX_OK;
1202    case A_EVENTQ_PROD:
1203        s->eventq.prod = data;
1204        return MEMTX_OK;
1205    case A_EVENTQ_CONS:
1206        s->eventq.cons = data;
1207        return MEMTX_OK;
1208    case A_EVENTQ_IRQ_CFG0: /* 64b */
1209        s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
1210        return MEMTX_OK;
1211    case A_EVENTQ_IRQ_CFG0 + 4:
1212        s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
1213        return MEMTX_OK;
1214    case A_EVENTQ_IRQ_CFG1:
1215        s->eventq_irq_cfg1 = data;
1216        return MEMTX_OK;
1217    case A_EVENTQ_IRQ_CFG2:
1218        s->eventq_irq_cfg2 = data;
1219        return MEMTX_OK;
1220    default:
1221        qemu_log_mask(LOG_UNIMP,
1222                      "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
1223                      __func__, offset);
1224        return MEMTX_OK;
1225    }
1226}
1227
1228static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
1229                                   unsigned size, MemTxAttrs attrs)
1230{
1231    SMMUState *sys = opaque;
1232    SMMUv3State *s = ARM_SMMUV3(sys);
1233    MemTxResult r;
1234
1235    /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1236    offset &= ~0x10000;
1237
1238    switch (size) {
1239    case 8:
1240        r = smmu_writell(s, offset, data, attrs);
1241        break;
1242    case 4:
1243        r = smmu_writel(s, offset, data, attrs);
1244        break;
1245    default:
1246        r = MEMTX_ERROR;
1247        break;
1248    }
1249
1250    trace_smmuv3_write_mmio(offset, data, size, r);
1251    return r;
1252}
1253
1254static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
1255                               uint64_t *data, MemTxAttrs attrs)
1256{
1257    switch (offset) {
1258    case A_GERROR_IRQ_CFG0:
1259        *data = s->gerror_irq_cfg0;
1260        return MEMTX_OK;
1261    case A_STRTAB_BASE:
1262        *data = s->strtab_base;
1263        return MEMTX_OK;
1264    case A_CMDQ_BASE:
1265        *data = s->cmdq.base;
1266        return MEMTX_OK;
1267    case A_EVENTQ_BASE:
1268        *data = s->eventq.base;
1269        return MEMTX_OK;
1270    default:
1271        *data = 0;
1272        qemu_log_mask(LOG_UNIMP,
1273                      "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
1274                      __func__, offset);
1275        return MEMTX_OK;
1276    }
1277}
1278
1279static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
1280                              uint64_t *data, MemTxAttrs attrs)
1281{
1282    switch (offset) {
1283    case A_IDREGS ... A_IDREGS + 0x2f:
1284        *data = smmuv3_idreg(offset - A_IDREGS);
1285        return MEMTX_OK;
1286    case A_IDR0 ... A_IDR5:
1287        *data = s->idr[(offset - A_IDR0) / 4];
1288        return MEMTX_OK;
1289    case A_IIDR:
1290        *data = s->iidr;
1291        return MEMTX_OK;
1292    case A_AIDR:
1293        *data = s->aidr;
1294        return MEMTX_OK;
1295    case A_CR0:
1296        *data = s->cr[0];
1297        return MEMTX_OK;
1298    case A_CR0ACK:
1299        *data = s->cr0ack;
1300        return MEMTX_OK;
1301    case A_CR1:
1302        *data = s->cr[1];
1303        return MEMTX_OK;
1304    case A_CR2:
1305        *data = s->cr[2];
1306        return MEMTX_OK;
1307    case A_STATUSR:
1308        *data = s->statusr;
1309        return MEMTX_OK;
1310    case A_IRQ_CTRL:
1311    case A_IRQ_CTRL_ACK:
1312        *data = s->irq_ctrl;
1313        return MEMTX_OK;
1314    case A_GERROR:
1315        *data = s->gerror;
1316        return MEMTX_OK;
1317    case A_GERRORN:
1318        *data = s->gerrorn;
1319        return MEMTX_OK;
1320    case A_GERROR_IRQ_CFG0: /* 64b */
1321        *data = extract64(s->gerror_irq_cfg0, 0, 32);
1322        return MEMTX_OK;
1323    case A_GERROR_IRQ_CFG0 + 4:
1324        *data = extract64(s->gerror_irq_cfg0, 32, 32);
1325        return MEMTX_OK;
1326    case A_GERROR_IRQ_CFG1:
1327        *data = s->gerror_irq_cfg1;
1328        return MEMTX_OK;
1329    case A_GERROR_IRQ_CFG2:
1330        *data = s->gerror_irq_cfg2;
1331        return MEMTX_OK;
1332    case A_STRTAB_BASE: /* 64b */
1333        *data = extract64(s->strtab_base, 0, 32);
1334        return MEMTX_OK;
1335    case A_STRTAB_BASE + 4: /* 64b */
1336        *data = extract64(s->strtab_base, 32, 32);
1337        return MEMTX_OK;
1338    case A_STRTAB_BASE_CFG:
1339        *data = s->strtab_base_cfg;
1340        return MEMTX_OK;
1341    case A_CMDQ_BASE: /* 64b */
1342        *data = extract64(s->cmdq.base, 0, 32);
1343        return MEMTX_OK;
1344    case A_CMDQ_BASE + 4:
1345        *data = extract64(s->cmdq.base, 32, 32);
1346        return MEMTX_OK;
1347    case A_CMDQ_PROD:
1348        *data = s->cmdq.prod;
1349        return MEMTX_OK;
1350    case A_CMDQ_CONS:
1351        *data = s->cmdq.cons;
1352        return MEMTX_OK;
1353    case A_EVENTQ_BASE: /* 64b */
1354        *data = extract64(s->eventq.base, 0, 32);
1355        return MEMTX_OK;
1356    case A_EVENTQ_BASE + 4: /* 64b */
1357        *data = extract64(s->eventq.base, 32, 32);
1358        return MEMTX_OK;
1359    case A_EVENTQ_PROD:
1360        *data = s->eventq.prod;
1361        return MEMTX_OK;
1362    case A_EVENTQ_CONS:
1363        *data = s->eventq.cons;
1364        return MEMTX_OK;
1365    default:
1366        *data = 0;
1367        qemu_log_mask(LOG_UNIMP,
1368                      "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
1369                      __func__, offset);
1370        return MEMTX_OK;
1371    }
1372}
1373
1374static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
1375                                  unsigned size, MemTxAttrs attrs)
1376{
1377    SMMUState *sys = opaque;
1378    SMMUv3State *s = ARM_SMMUV3(sys);
1379    MemTxResult r;
1380
1381    /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1382    offset &= ~0x10000;
1383
1384    switch (size) {
1385    case 8:
1386        r = smmu_readll(s, offset, data, attrs);
1387        break;
1388    case 4:
1389        r = smmu_readl(s, offset, data, attrs);
1390        break;
1391    default:
1392        r = MEMTX_ERROR;
1393        break;
1394    }
1395
1396    trace_smmuv3_read_mmio(offset, *data, size, r);
1397    return r;
1398}
1399
1400static const MemoryRegionOps smmu_mem_ops = {
1401    .read_with_attrs = smmu_read_mmio,
1402    .write_with_attrs = smmu_write_mmio,
1403    .endianness = DEVICE_LITTLE_ENDIAN,
1404    .valid = {
1405        .min_access_size = 4,
1406        .max_access_size = 8,
1407    },
1408    .impl = {
1409        .min_access_size = 4,
1410        .max_access_size = 8,
1411    },
1412};
1413
1414static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
1415{
1416    int i;
1417
1418    for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
1419        sysbus_init_irq(dev, &s->irq[i]);
1420    }
1421}
1422
1423static void smmu_reset(DeviceState *dev)
1424{
1425    SMMUv3State *s = ARM_SMMUV3(dev);
1426    SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1427
1428    c->parent_reset(dev);
1429
1430    smmuv3_init_regs(s);
1431}
1432
1433static void smmu_realize(DeviceState *d, Error **errp)
1434{
1435    SMMUState *sys = ARM_SMMU(d);
1436    SMMUv3State *s = ARM_SMMUV3(sys);
1437    SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1438    SysBusDevice *dev = SYS_BUS_DEVICE(d);
1439    Error *local_err = NULL;
1440
1441    c->parent_realize(d, &local_err);
1442    if (local_err) {
1443        error_propagate(errp, local_err);
1444        return;
1445    }
1446
1447    qemu_mutex_init(&s->mutex);
1448
1449    memory_region_init_io(&sys->iomem, OBJECT(s),
1450                          &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
1451
1452    sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
1453
1454    sysbus_init_mmio(dev, &sys->iomem);
1455
1456    smmu_init_irq(s, dev);
1457}
1458
1459static const VMStateDescription vmstate_smmuv3_queue = {
1460    .name = "smmuv3_queue",
1461    .version_id = 1,
1462    .minimum_version_id = 1,
1463    .fields = (VMStateField[]) {
1464        VMSTATE_UINT64(base, SMMUQueue),
1465        VMSTATE_UINT32(prod, SMMUQueue),
1466        VMSTATE_UINT32(cons, SMMUQueue),
1467        VMSTATE_UINT8(log2size, SMMUQueue),
1468        VMSTATE_END_OF_LIST(),
1469    },
1470};
1471
1472static const VMStateDescription vmstate_smmuv3 = {
1473    .name = "smmuv3",
1474    .version_id = 1,
1475    .minimum_version_id = 1,
1476    .priority = MIG_PRI_IOMMU,
1477    .fields = (VMStateField[]) {
1478        VMSTATE_UINT32(features, SMMUv3State),
1479        VMSTATE_UINT8(sid_size, SMMUv3State),
1480        VMSTATE_UINT8(sid_split, SMMUv3State),
1481
1482        VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
1483        VMSTATE_UINT32(cr0ack, SMMUv3State),
1484        VMSTATE_UINT32(statusr, SMMUv3State),
1485        VMSTATE_UINT32(irq_ctrl, SMMUv3State),
1486        VMSTATE_UINT32(gerror, SMMUv3State),
1487        VMSTATE_UINT32(gerrorn, SMMUv3State),
1488        VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
1489        VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
1490        VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
1491        VMSTATE_UINT64(strtab_base, SMMUv3State),
1492        VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
1493        VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
1494        VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
1495        VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
1496
1497        VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1498        VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1499
1500        VMSTATE_END_OF_LIST(),
1501    },
1502};
1503
1504static void smmuv3_instance_init(Object *obj)
1505{
1506    /* Nothing much to do here as of now */
1507}
1508
1509static void smmuv3_class_init(ObjectClass *klass, void *data)
1510{
1511    DeviceClass *dc = DEVICE_CLASS(klass);
1512    SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
1513
1514    dc->vmsd = &vmstate_smmuv3;
1515    device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset);
1516    c->parent_realize = dc->realize;
1517    dc->realize = smmu_realize;
1518}
1519
1520static int smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
1521                                      IOMMUNotifierFlag old,
1522                                      IOMMUNotifierFlag new,
1523                                      Error **errp)
1524{
1525    SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu);
1526    SMMUv3State *s3 = sdev->smmu;
1527    SMMUState *s = &(s3->smmu_state);
1528
1529    if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) {
1530        error_setg(errp, "SMMUv3 does not support dev-iotlb yet");
1531        return -EINVAL;
1532    }
1533
1534    if (new & IOMMU_NOTIFIER_MAP) {
1535        error_setg(errp,
1536                   "device %02x.%02x.%x requires iommu MAP notifier which is "
1537                   "not currently supported", pci_bus_num(sdev->bus),
1538                   PCI_SLOT(sdev->devfn), PCI_FUNC(sdev->devfn));
1539        return -EINVAL;
1540    }
1541
1542    if (old == IOMMU_NOTIFIER_NONE) {
1543        trace_smmuv3_notify_flag_add(iommu->parent_obj.name);
1544        QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next);
1545    } else if (new == IOMMU_NOTIFIER_NONE) {
1546        trace_smmuv3_notify_flag_del(iommu->parent_obj.name);
1547        QLIST_REMOVE(sdev, next);
1548    }
1549    return 0;
1550}
1551
1552static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
1553                                                  void *data)
1554{
1555    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1556
1557    imrc->translate = smmuv3_translate;
1558    imrc->notify_flag_changed = smmuv3_notify_flag_changed;
1559}
1560
1561static const TypeInfo smmuv3_type_info = {
1562    .name          = TYPE_ARM_SMMUV3,
1563    .parent        = TYPE_ARM_SMMU,
1564    .instance_size = sizeof(SMMUv3State),
1565    .instance_init = smmuv3_instance_init,
1566    .class_size    = sizeof(SMMUv3Class),
1567    .class_init    = smmuv3_class_init,
1568};
1569
1570static const TypeInfo smmuv3_iommu_memory_region_info = {
1571    .parent = TYPE_IOMMU_MEMORY_REGION,
1572    .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
1573    .class_init = smmuv3_iommu_memory_region_class_init,
1574};
1575
1576static void smmuv3_register_types(void)
1577{
1578    type_register(&smmuv3_type_info);
1579    type_register(&smmuv3_iommu_memory_region_info);
1580}
1581
1582type_init(smmuv3_register_types)
1583
1584