qemu/hw/arm/smmuv3.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2014-2016 Broadcom Corporation
   3 * Copyright (c) 2017 Red Hat, Inc.
   4 * Written by Prem Mallappa, Eric Auger
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License along
  16 * with this program; if not, see <http://www.gnu.org/licenses/>.
  17 */
  18
  19#include "qemu/osdep.h"
  20#include "hw/boards.h"
  21#include "sysemu/sysemu.h"
  22#include "hw/sysbus.h"
  23#include "hw/qdev-core.h"
  24#include "hw/pci/pci.h"
  25#include "exec/address-spaces.h"
  26#include "cpu.h"
  27#include "trace.h"
  28#include "qemu/log.h"
  29#include "qemu/error-report.h"
  30#include "qapi/error.h"
  31
  32#include "hw/arm/smmuv3.h"
  33#include "smmuv3-internal.h"
  34
  35/**
  36 * smmuv3_trigger_irq - pulse @irq if enabled and update
  37 * GERROR register in case of GERROR interrupt
  38 *
  39 * @irq: irq type
  40 * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR)
  41 */
  42static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq,
  43                               uint32_t gerror_mask)
  44{
  45
  46    bool pulse = false;
  47
  48    switch (irq) {
  49    case SMMU_IRQ_EVTQ:
  50        pulse = smmuv3_eventq_irq_enabled(s);
  51        break;
  52    case SMMU_IRQ_PRIQ:
  53        qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n");
  54        break;
  55    case SMMU_IRQ_CMD_SYNC:
  56        pulse = true;
  57        break;
  58    case SMMU_IRQ_GERROR:
  59    {
  60        uint32_t pending = s->gerror ^ s->gerrorn;
  61        uint32_t new_gerrors = ~pending & gerror_mask;
  62
  63        if (!new_gerrors) {
  64            /* only toggle non pending errors */
  65            return;
  66        }
  67        s->gerror ^= new_gerrors;
  68        trace_smmuv3_write_gerror(new_gerrors, s->gerror);
  69
  70        pulse = smmuv3_gerror_irq_enabled(s);
  71        break;
  72    }
  73    }
  74    if (pulse) {
  75            trace_smmuv3_trigger_irq(irq);
  76            qemu_irq_pulse(s->irq[irq]);
  77    }
  78}
  79
  80static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn)
  81{
  82    uint32_t pending = s->gerror ^ s->gerrorn;
  83    uint32_t toggled = s->gerrorn ^ new_gerrorn;
  84
  85    if (toggled & ~pending) {
  86        qemu_log_mask(LOG_GUEST_ERROR,
  87                      "guest toggles non pending errors = 0x%x\n",
  88                      toggled & ~pending);
  89    }
  90
  91    /*
  92     * We do not raise any error in case guest toggles bits corresponding
  93     * to not active IRQs (CONSTRAINED UNPREDICTABLE)
  94     */
  95    s->gerrorn = new_gerrorn;
  96
  97    trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn);
  98}
  99
 100static inline MemTxResult queue_read(SMMUQueue *q, void *data)
 101{
 102    dma_addr_t addr = Q_CONS_ENTRY(q);
 103
 104    return dma_memory_read(&address_space_memory, addr, data, q->entry_size);
 105}
 106
 107static MemTxResult queue_write(SMMUQueue *q, void *data)
 108{
 109    dma_addr_t addr = Q_PROD_ENTRY(q);
 110    MemTxResult ret;
 111
 112    ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size);
 113    if (ret != MEMTX_OK) {
 114        return ret;
 115    }
 116
 117    queue_prod_incr(q);
 118    return MEMTX_OK;
 119}
 120
 121static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt)
 122{
 123    SMMUQueue *q = &s->eventq;
 124    MemTxResult r;
 125
 126    if (!smmuv3_eventq_enabled(s)) {
 127        return MEMTX_ERROR;
 128    }
 129
 130    if (smmuv3_q_full(q)) {
 131        return MEMTX_ERROR;
 132    }
 133
 134    r = queue_write(q, evt);
 135    if (r != MEMTX_OK) {
 136        return r;
 137    }
 138
 139    if (!smmuv3_q_empty(q)) {
 140        smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0);
 141    }
 142    return MEMTX_OK;
 143}
 144
 145void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info)
 146{
 147    Evt evt = {};
 148    MemTxResult r;
 149
 150    if (!smmuv3_eventq_enabled(s)) {
 151        return;
 152    }
 153
 154    EVT_SET_TYPE(&evt, info->type);
 155    EVT_SET_SID(&evt, info->sid);
 156
 157    switch (info->type) {
 158    case SMMU_EVT_NONE:
 159        return;
 160    case SMMU_EVT_F_UUT:
 161        EVT_SET_SSID(&evt, info->u.f_uut.ssid);
 162        EVT_SET_SSV(&evt,  info->u.f_uut.ssv);
 163        EVT_SET_ADDR(&evt, info->u.f_uut.addr);
 164        EVT_SET_RNW(&evt,  info->u.f_uut.rnw);
 165        EVT_SET_PNU(&evt,  info->u.f_uut.pnu);
 166        EVT_SET_IND(&evt,  info->u.f_uut.ind);
 167        break;
 168    case SMMU_EVT_C_BAD_STREAMID:
 169        EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid);
 170        EVT_SET_SSV(&evt,  info->u.c_bad_streamid.ssv);
 171        break;
 172    case SMMU_EVT_F_STE_FETCH:
 173        EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid);
 174        EVT_SET_SSV(&evt,  info->u.f_ste_fetch.ssv);
 175        EVT_SET_ADDR(&evt, info->u.f_ste_fetch.addr);
 176        break;
 177    case SMMU_EVT_C_BAD_STE:
 178        EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid);
 179        EVT_SET_SSV(&evt,  info->u.c_bad_ste.ssv);
 180        break;
 181    case SMMU_EVT_F_STREAM_DISABLED:
 182        break;
 183    case SMMU_EVT_F_TRANS_FORBIDDEN:
 184        EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr);
 185        EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw);
 186        break;
 187    case SMMU_EVT_C_BAD_SUBSTREAMID:
 188        EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid);
 189        break;
 190    case SMMU_EVT_F_CD_FETCH:
 191        EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid);
 192        EVT_SET_SSV(&evt,  info->u.f_cd_fetch.ssv);
 193        EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr);
 194        break;
 195    case SMMU_EVT_C_BAD_CD:
 196        EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid);
 197        EVT_SET_SSV(&evt,  info->u.c_bad_cd.ssv);
 198        break;
 199    case SMMU_EVT_F_WALK_EABT:
 200    case SMMU_EVT_F_TRANSLATION:
 201    case SMMU_EVT_F_ADDR_SIZE:
 202    case SMMU_EVT_F_ACCESS:
 203    case SMMU_EVT_F_PERMISSION:
 204        EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall);
 205        EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag);
 206        EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid);
 207        EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv);
 208        EVT_SET_S2(&evt, info->u.f_walk_eabt.s2);
 209        EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr);
 210        EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw);
 211        EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu);
 212        EVT_SET_IND(&evt, info->u.f_walk_eabt.ind);
 213        EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class);
 214        EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2);
 215        break;
 216    case SMMU_EVT_F_CFG_CONFLICT:
 217        EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid);
 218        EVT_SET_SSV(&evt,  info->u.f_cfg_conflict.ssv);
 219        break;
 220    /* rest is not implemented */
 221    case SMMU_EVT_F_BAD_ATS_TREQ:
 222    case SMMU_EVT_F_TLB_CONFLICT:
 223    case SMMU_EVT_E_PAGE_REQ:
 224    default:
 225        g_assert_not_reached();
 226    }
 227
 228    trace_smmuv3_record_event(smmu_event_string(info->type), info->sid);
 229    r = smmuv3_write_eventq(s, &evt);
 230    if (r != MEMTX_OK) {
 231        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK);
 232    }
 233    info->recorded = true;
 234}
 235
 236static void smmuv3_init_regs(SMMUv3State *s)
 237{
 238    /**
 239     * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID,
 240     *       multi-level stream table
 241     */
 242    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */
 243    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */
 244    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */
 245    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */
 246    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */
 247    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */
 248    /* terminated transaction will always be aborted/error returned */
 249    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1);
 250    /* 2-level stream table supported */
 251    s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1);
 252
 253    s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE);
 254    s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS);
 255    s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS,   SMMU_CMDQS);
 256
 257   /* 4K and 64K granule support */
 258    s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1);
 259    s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1);
 260    s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */
 261
 262    s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS);
 263    s->cmdq.prod = 0;
 264    s->cmdq.cons = 0;
 265    s->cmdq.entry_size = sizeof(struct Cmd);
 266    s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS);
 267    s->eventq.prod = 0;
 268    s->eventq.cons = 0;
 269    s->eventq.entry_size = sizeof(struct Evt);
 270
 271    s->features = 0;
 272    s->sid_split = 0;
 273}
 274
 275static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf,
 276                        SMMUEventInfo *event)
 277{
 278    int ret;
 279
 280    trace_smmuv3_get_ste(addr);
 281    /* TODO: guarantee 64-bit single-copy atomicity */
 282    ret = dma_memory_read(&address_space_memory, addr,
 283                          (void *)buf, sizeof(*buf));
 284    if (ret != MEMTX_OK) {
 285        qemu_log_mask(LOG_GUEST_ERROR,
 286                      "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
 287        event->type = SMMU_EVT_F_STE_FETCH;
 288        event->u.f_ste_fetch.addr = addr;
 289        return -EINVAL;
 290    }
 291    return 0;
 292
 293}
 294
 295/* @ssid > 0 not supported yet */
 296static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid,
 297                       CD *buf, SMMUEventInfo *event)
 298{
 299    dma_addr_t addr = STE_CTXPTR(ste);
 300    int ret;
 301
 302    trace_smmuv3_get_cd(addr);
 303    /* TODO: guarantee 64-bit single-copy atomicity */
 304    ret = dma_memory_read(&address_space_memory, addr,
 305                           (void *)buf, sizeof(*buf));
 306    if (ret != MEMTX_OK) {
 307        qemu_log_mask(LOG_GUEST_ERROR,
 308                      "Cannot fetch pte at address=0x%"PRIx64"\n", addr);
 309        event->type = SMMU_EVT_F_CD_FETCH;
 310        event->u.f_ste_fetch.addr = addr;
 311        return -EINVAL;
 312    }
 313    return 0;
 314}
 315
 316/* Returns < 0 in case of invalid STE, 0 otherwise */
 317static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg,
 318                      STE *ste, SMMUEventInfo *event)
 319{
 320    uint32_t config;
 321
 322    if (!STE_VALID(ste)) {
 323        goto bad_ste;
 324    }
 325
 326    config = STE_CONFIG(ste);
 327
 328    if (STE_CFG_ABORT(config)) {
 329        cfg->aborted = true;
 330        return 0;
 331    }
 332
 333    if (STE_CFG_BYPASS(config)) {
 334        cfg->bypassed = true;
 335        return 0;
 336    }
 337
 338    if (STE_CFG_S2_ENABLED(config)) {
 339        qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n");
 340        goto bad_ste;
 341    }
 342
 343    if (STE_S1CDMAX(ste) != 0) {
 344        qemu_log_mask(LOG_UNIMP,
 345                      "SMMUv3 does not support multiple context descriptors yet\n");
 346        goto bad_ste;
 347    }
 348
 349    if (STE_S1STALLD(ste)) {
 350        qemu_log_mask(LOG_UNIMP,
 351                      "SMMUv3 S1 stalling fault model not allowed yet\n");
 352        goto bad_ste;
 353    }
 354    return 0;
 355
 356bad_ste:
 357    event->type = SMMU_EVT_C_BAD_STE;
 358    return -EINVAL;
 359}
 360
 361/**
 362 * smmu_find_ste - Return the stream table entry associated
 363 * to the sid
 364 *
 365 * @s: smmuv3 handle
 366 * @sid: stream ID
 367 * @ste: returned stream table entry
 368 * @event: handle to an event info
 369 *
 370 * Supports linear and 2-level stream table
 371 * Return 0 on success, -EINVAL otherwise
 372 */
 373static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste,
 374                         SMMUEventInfo *event)
 375{
 376    dma_addr_t addr;
 377    int ret;
 378
 379    trace_smmuv3_find_ste(sid, s->features, s->sid_split);
 380    /* Check SID range */
 381    if (sid > (1 << SMMU_IDR1_SIDSIZE)) {
 382        event->type = SMMU_EVT_C_BAD_STREAMID;
 383        return -EINVAL;
 384    }
 385    if (s->features & SMMU_FEATURE_2LVL_STE) {
 386        int l1_ste_offset, l2_ste_offset, max_l2_ste, span;
 387        dma_addr_t strtab_base, l1ptr, l2ptr;
 388        STEDesc l1std;
 389
 390        strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK;
 391        l1_ste_offset = sid >> s->sid_split;
 392        l2_ste_offset = sid & ((1 << s->sid_split) - 1);
 393        l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std));
 394        /* TODO: guarantee 64-bit single-copy atomicity */
 395        ret = dma_memory_read(&address_space_memory, l1ptr,
 396                              (uint8_t *)&l1std, sizeof(l1std));
 397        if (ret != MEMTX_OK) {
 398            qemu_log_mask(LOG_GUEST_ERROR,
 399                          "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr);
 400            event->type = SMMU_EVT_F_STE_FETCH;
 401            event->u.f_ste_fetch.addr = l1ptr;
 402            return -EINVAL;
 403        }
 404
 405        span = L1STD_SPAN(&l1std);
 406
 407        if (!span) {
 408            /* l2ptr is not valid */
 409            qemu_log_mask(LOG_GUEST_ERROR,
 410                          "invalid sid=%d (L1STD span=0)\n", sid);
 411            event->type = SMMU_EVT_C_BAD_STREAMID;
 412            return -EINVAL;
 413        }
 414        max_l2_ste = (1 << span) - 1;
 415        l2ptr = l1std_l2ptr(&l1std);
 416        trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset,
 417                                   l2ptr, l2_ste_offset, max_l2_ste);
 418        if (l2_ste_offset > max_l2_ste) {
 419            qemu_log_mask(LOG_GUEST_ERROR,
 420                          "l2_ste_offset=%d > max_l2_ste=%d\n",
 421                          l2_ste_offset, max_l2_ste);
 422            event->type = SMMU_EVT_C_BAD_STE;
 423            return -EINVAL;
 424        }
 425        addr = l2ptr + l2_ste_offset * sizeof(*ste);
 426    } else {
 427        addr = s->strtab_base + sid * sizeof(*ste);
 428    }
 429
 430    if (smmu_get_ste(s, addr, ste, event)) {
 431        return -EINVAL;
 432    }
 433
 434    return 0;
 435}
 436
 437static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event)
 438{
 439    int ret = -EINVAL;
 440    int i;
 441
 442    if (!CD_VALID(cd) || !CD_AARCH64(cd)) {
 443        goto bad_cd;
 444    }
 445    if (!CD_A(cd)) {
 446        goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */
 447    }
 448    if (CD_S(cd)) {
 449        goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */
 450    }
 451    if (CD_HA(cd) || CD_HD(cd)) {
 452        goto bad_cd; /* HTTU = 0 */
 453    }
 454
 455    /* we support only those at the moment */
 456    cfg->aa64 = true;
 457    cfg->stage = 1;
 458
 459    cfg->oas = oas2bits(CD_IPS(cd));
 460    cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas);
 461    cfg->tbi = CD_TBI(cd);
 462    cfg->asid = CD_ASID(cd);
 463
 464    trace_smmuv3_decode_cd(cfg->oas);
 465
 466    /* decode data dependent on TT */
 467    for (i = 0; i <= 1; i++) {
 468        int tg, tsz;
 469        SMMUTransTableInfo *tt = &cfg->tt[i];
 470
 471        cfg->tt[i].disabled = CD_EPD(cd, i);
 472        if (cfg->tt[i].disabled) {
 473            continue;
 474        }
 475
 476        tsz = CD_TSZ(cd, i);
 477        if (tsz < 16 || tsz > 39) {
 478            goto bad_cd;
 479        }
 480
 481        tg = CD_TG(cd, i);
 482        tt->granule_sz = tg2granule(tg, i);
 483        if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) {
 484            goto bad_cd;
 485        }
 486
 487        tt->tsz = tsz;
 488        tt->ttb = CD_TTB(cd, i);
 489        if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) {
 490            goto bad_cd;
 491        }
 492        trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz);
 493    }
 494
 495    event->record_trans_faults = CD_R(cd);
 496
 497    return 0;
 498
 499bad_cd:
 500    event->type = SMMU_EVT_C_BAD_CD;
 501    return ret;
 502}
 503
 504/**
 505 * smmuv3_decode_config - Prepare the translation configuration
 506 * for the @mr iommu region
 507 * @mr: iommu memory region the translation config must be prepared for
 508 * @cfg: output translation configuration which is populated through
 509 *       the different configuration decoding steps
 510 * @event: must be zero'ed by the caller
 511 *
 512 * return < 0 in case of config decoding error (@event is filled
 513 * accordingly). Return 0 otherwise.
 514 */
 515static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg,
 516                                SMMUEventInfo *event)
 517{
 518    SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
 519    uint32_t sid = smmu_get_sid(sdev);
 520    SMMUv3State *s = sdev->smmu;
 521    int ret;
 522    STE ste;
 523    CD cd;
 524
 525    ret = smmu_find_ste(s, sid, &ste, event);
 526    if (ret) {
 527        return ret;
 528    }
 529
 530    ret = decode_ste(s, cfg, &ste, event);
 531    if (ret) {
 532        return ret;
 533    }
 534
 535    if (cfg->aborted || cfg->bypassed) {
 536        return 0;
 537    }
 538
 539    ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event);
 540    if (ret) {
 541        return ret;
 542    }
 543
 544    return decode_cd(cfg, &cd, event);
 545}
 546
 547/**
 548 * smmuv3_get_config - Look up for a cached copy of configuration data for
 549 * @sdev and on cache miss performs a configuration structure decoding from
 550 * guest RAM.
 551 *
 552 * @sdev: SMMUDevice handle
 553 * @event: output event info
 554 *
 555 * The configuration cache contains data resulting from both STE and CD
 556 * decoding under the form of an SMMUTransCfg struct. The hash table is indexed
 557 * by the SMMUDevice handle.
 558 */
 559static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event)
 560{
 561    SMMUv3State *s = sdev->smmu;
 562    SMMUState *bc = &s->smmu_state;
 563    SMMUTransCfg *cfg;
 564
 565    cfg = g_hash_table_lookup(bc->configs, sdev);
 566    if (cfg) {
 567        sdev->cfg_cache_hits++;
 568        trace_smmuv3_config_cache_hit(smmu_get_sid(sdev),
 569                            sdev->cfg_cache_hits, sdev->cfg_cache_misses,
 570                            100 * sdev->cfg_cache_hits /
 571                            (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
 572    } else {
 573        sdev->cfg_cache_misses++;
 574        trace_smmuv3_config_cache_miss(smmu_get_sid(sdev),
 575                            sdev->cfg_cache_hits, sdev->cfg_cache_misses,
 576                            100 * sdev->cfg_cache_hits /
 577                            (sdev->cfg_cache_hits + sdev->cfg_cache_misses));
 578        cfg = g_new0(SMMUTransCfg, 1);
 579
 580        if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) {
 581            g_hash_table_insert(bc->configs, sdev, cfg);
 582        } else {
 583            g_free(cfg);
 584            cfg = NULL;
 585        }
 586    }
 587    return cfg;
 588}
 589
 590static void smmuv3_flush_config(SMMUDevice *sdev)
 591{
 592    SMMUv3State *s = sdev->smmu;
 593    SMMUState *bc = &s->smmu_state;
 594
 595    trace_smmuv3_config_cache_inv(smmu_get_sid(sdev));
 596    g_hash_table_remove(bc->configs, sdev);
 597}
 598
 599static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr,
 600                                      IOMMUAccessFlags flag, int iommu_idx)
 601{
 602    SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
 603    SMMUv3State *s = sdev->smmu;
 604    uint32_t sid = smmu_get_sid(sdev);
 605    SMMUEventInfo event = {.type = SMMU_EVT_NONE, .sid = sid};
 606    SMMUPTWEventInfo ptw_info = {};
 607    SMMUTranslationStatus status;
 608    SMMUState *bs = ARM_SMMU(s);
 609    uint64_t page_mask, aligned_addr;
 610    IOMMUTLBEntry *cached_entry = NULL;
 611    SMMUTransTableInfo *tt;
 612    SMMUTransCfg *cfg = NULL;
 613    IOMMUTLBEntry entry = {
 614        .target_as = &address_space_memory,
 615        .iova = addr,
 616        .translated_addr = addr,
 617        .addr_mask = ~(hwaddr)0,
 618        .perm = IOMMU_NONE,
 619    };
 620    SMMUIOTLBKey key, *new_key;
 621
 622    qemu_mutex_lock(&s->mutex);
 623
 624    if (!smmu_enabled(s)) {
 625        status = SMMU_TRANS_DISABLE;
 626        goto epilogue;
 627    }
 628
 629    cfg = smmuv3_get_config(sdev, &event);
 630    if (!cfg) {
 631        status = SMMU_TRANS_ERROR;
 632        goto epilogue;
 633    }
 634
 635    if (cfg->aborted) {
 636        status = SMMU_TRANS_ABORT;
 637        goto epilogue;
 638    }
 639
 640    if (cfg->bypassed) {
 641        status = SMMU_TRANS_BYPASS;
 642        goto epilogue;
 643    }
 644
 645    tt = select_tt(cfg, addr);
 646    if (!tt) {
 647        if (event.record_trans_faults) {
 648            event.type = SMMU_EVT_F_TRANSLATION;
 649            event.u.f_translation.addr = addr;
 650            event.u.f_translation.rnw = flag & 0x1;
 651        }
 652        status = SMMU_TRANS_ERROR;
 653        goto epilogue;
 654    }
 655
 656    page_mask = (1ULL << (tt->granule_sz)) - 1;
 657    aligned_addr = addr & ~page_mask;
 658
 659    key.asid = cfg->asid;
 660    key.iova = aligned_addr;
 661
 662    cached_entry = g_hash_table_lookup(bs->iotlb, &key);
 663    if (cached_entry) {
 664        cfg->iotlb_hits++;
 665        trace_smmu_iotlb_cache_hit(cfg->asid, aligned_addr,
 666                                   cfg->iotlb_hits, cfg->iotlb_misses,
 667                                   100 * cfg->iotlb_hits /
 668                                   (cfg->iotlb_hits + cfg->iotlb_misses));
 669        if ((flag & IOMMU_WO) && !(cached_entry->perm & IOMMU_WO)) {
 670            status = SMMU_TRANS_ERROR;
 671            if (event.record_trans_faults) {
 672                event.type = SMMU_EVT_F_PERMISSION;
 673                event.u.f_permission.addr = addr;
 674                event.u.f_permission.rnw = flag & 0x1;
 675            }
 676        } else {
 677            status = SMMU_TRANS_SUCCESS;
 678        }
 679        goto epilogue;
 680    }
 681
 682    cfg->iotlb_misses++;
 683    trace_smmu_iotlb_cache_miss(cfg->asid, addr & ~page_mask,
 684                                cfg->iotlb_hits, cfg->iotlb_misses,
 685                                100 * cfg->iotlb_hits /
 686                                (cfg->iotlb_hits + cfg->iotlb_misses));
 687
 688    if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) {
 689        smmu_iotlb_inv_all(bs);
 690    }
 691
 692    cached_entry = g_new0(IOMMUTLBEntry, 1);
 693
 694    if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) {
 695        g_free(cached_entry);
 696        switch (ptw_info.type) {
 697        case SMMU_PTW_ERR_WALK_EABT:
 698            event.type = SMMU_EVT_F_WALK_EABT;
 699            event.u.f_walk_eabt.addr = addr;
 700            event.u.f_walk_eabt.rnw = flag & 0x1;
 701            event.u.f_walk_eabt.class = 0x1;
 702            event.u.f_walk_eabt.addr2 = ptw_info.addr;
 703            break;
 704        case SMMU_PTW_ERR_TRANSLATION:
 705            if (event.record_trans_faults) {
 706                event.type = SMMU_EVT_F_TRANSLATION;
 707                event.u.f_translation.addr = addr;
 708                event.u.f_translation.rnw = flag & 0x1;
 709            }
 710            break;
 711        case SMMU_PTW_ERR_ADDR_SIZE:
 712            if (event.record_trans_faults) {
 713                event.type = SMMU_EVT_F_ADDR_SIZE;
 714                event.u.f_addr_size.addr = addr;
 715                event.u.f_addr_size.rnw = flag & 0x1;
 716            }
 717            break;
 718        case SMMU_PTW_ERR_ACCESS:
 719            if (event.record_trans_faults) {
 720                event.type = SMMU_EVT_F_ACCESS;
 721                event.u.f_access.addr = addr;
 722                event.u.f_access.rnw = flag & 0x1;
 723            }
 724            break;
 725        case SMMU_PTW_ERR_PERMISSION:
 726            if (event.record_trans_faults) {
 727                event.type = SMMU_EVT_F_PERMISSION;
 728                event.u.f_permission.addr = addr;
 729                event.u.f_permission.rnw = flag & 0x1;
 730            }
 731            break;
 732        default:
 733            g_assert_not_reached();
 734        }
 735        status = SMMU_TRANS_ERROR;
 736    } else {
 737        new_key = g_new0(SMMUIOTLBKey, 1);
 738        new_key->asid = cfg->asid;
 739        new_key->iova = aligned_addr;
 740        g_hash_table_insert(bs->iotlb, new_key, cached_entry);
 741        status = SMMU_TRANS_SUCCESS;
 742    }
 743
 744epilogue:
 745    qemu_mutex_unlock(&s->mutex);
 746    switch (status) {
 747    case SMMU_TRANS_SUCCESS:
 748        entry.perm = flag;
 749        entry.translated_addr = cached_entry->translated_addr +
 750                                    (addr & page_mask);
 751        entry.addr_mask = cached_entry->addr_mask;
 752        trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr,
 753                                       entry.translated_addr, entry.perm);
 754        break;
 755    case SMMU_TRANS_DISABLE:
 756        entry.perm = flag;
 757        entry.addr_mask = ~TARGET_PAGE_MASK;
 758        trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr,
 759                                      entry.perm);
 760        break;
 761    case SMMU_TRANS_BYPASS:
 762        entry.perm = flag;
 763        entry.addr_mask = ~TARGET_PAGE_MASK;
 764        trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr,
 765                                      entry.perm);
 766        break;
 767    case SMMU_TRANS_ABORT:
 768        /* no event is recorded on abort */
 769        trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr,
 770                                     entry.perm);
 771        break;
 772    case SMMU_TRANS_ERROR:
 773        qemu_log_mask(LOG_GUEST_ERROR,
 774                      "%s translation failed for iova=0x%"PRIx64"(%s)\n",
 775                      mr->parent_obj.name, addr, smmu_event_string(event.type));
 776        smmuv3_record_event(s, &event);
 777        break;
 778    }
 779
 780    return entry;
 781}
 782
 783/**
 784 * smmuv3_notify_iova - call the notifier @n for a given
 785 * @asid and @iova tuple.
 786 *
 787 * @mr: IOMMU mr region handle
 788 * @n: notifier to be called
 789 * @asid: address space ID or negative value if we don't care
 790 * @iova: iova
 791 */
 792static void smmuv3_notify_iova(IOMMUMemoryRegion *mr,
 793                               IOMMUNotifier *n,
 794                               int asid,
 795                               dma_addr_t iova)
 796{
 797    SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu);
 798    SMMUEventInfo event = {};
 799    SMMUTransTableInfo *tt;
 800    SMMUTransCfg *cfg;
 801    IOMMUTLBEntry entry;
 802
 803    cfg = smmuv3_get_config(sdev, &event);
 804    if (!cfg) {
 805        qemu_log_mask(LOG_GUEST_ERROR,
 806                      "%s error decoding the configuration for iommu mr=%s\n",
 807                      __func__, mr->parent_obj.name);
 808        return;
 809    }
 810
 811    if (asid >= 0 && cfg->asid != asid) {
 812        return;
 813    }
 814
 815    tt = select_tt(cfg, iova);
 816    if (!tt) {
 817        return;
 818    }
 819
 820    entry.target_as = &address_space_memory;
 821    entry.iova = iova;
 822    entry.addr_mask = (1 << tt->granule_sz) - 1;
 823    entry.perm = IOMMU_NONE;
 824
 825    memory_region_notify_one(n, &entry);
 826}
 827
 828/* invalidate an asid/iova tuple in all mr's */
 829static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova)
 830{
 831    SMMUNotifierNode *node;
 832
 833    QLIST_FOREACH(node, &s->notifiers_list, next) {
 834        IOMMUMemoryRegion *mr = &node->sdev->iommu;
 835        IOMMUNotifier *n;
 836
 837        trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova);
 838
 839        IOMMU_NOTIFIER_FOREACH(n, mr) {
 840            smmuv3_notify_iova(mr, n, asid, iova);
 841        }
 842    }
 843}
 844
 845static int smmuv3_cmdq_consume(SMMUv3State *s)
 846{
 847    SMMUState *bs = ARM_SMMU(s);
 848    SMMUCmdError cmd_error = SMMU_CERROR_NONE;
 849    SMMUQueue *q = &s->cmdq;
 850    SMMUCommandType type = 0;
 851
 852    if (!smmuv3_cmdq_enabled(s)) {
 853        return 0;
 854    }
 855    /*
 856     * some commands depend on register values, typically CR0. In case those
 857     * register values change while handling the command, spec says it
 858     * is UNPREDICTABLE whether the command is interpreted under the new
 859     * or old value.
 860     */
 861
 862    while (!smmuv3_q_empty(q)) {
 863        uint32_t pending = s->gerror ^ s->gerrorn;
 864        Cmd cmd;
 865
 866        trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q),
 867                                  Q_PROD_WRAP(q), Q_CONS_WRAP(q));
 868
 869        if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) {
 870            break;
 871        }
 872
 873        if (queue_read(q, &cmd) != MEMTX_OK) {
 874            cmd_error = SMMU_CERROR_ABT;
 875            break;
 876        }
 877
 878        type = CMD_TYPE(&cmd);
 879
 880        trace_smmuv3_cmdq_opcode(smmu_cmd_string(type));
 881
 882        qemu_mutex_lock(&s->mutex);
 883        switch (type) {
 884        case SMMU_CMD_SYNC:
 885            if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) {
 886                smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0);
 887            }
 888            break;
 889        case SMMU_CMD_PREFETCH_CONFIG:
 890        case SMMU_CMD_PREFETCH_ADDR:
 891            break;
 892        case SMMU_CMD_CFGI_STE:
 893        {
 894            uint32_t sid = CMD_SID(&cmd);
 895            IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
 896            SMMUDevice *sdev;
 897
 898            if (CMD_SSEC(&cmd)) {
 899                cmd_error = SMMU_CERROR_ILL;
 900                break;
 901            }
 902
 903            if (!mr) {
 904                break;
 905            }
 906
 907            trace_smmuv3_cmdq_cfgi_ste(sid);
 908            sdev = container_of(mr, SMMUDevice, iommu);
 909            smmuv3_flush_config(sdev);
 910
 911            break;
 912        }
 913        case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */
 914        {
 915            uint32_t start = CMD_SID(&cmd), end, i;
 916            uint8_t range = CMD_STE_RANGE(&cmd);
 917
 918            if (CMD_SSEC(&cmd)) {
 919                cmd_error = SMMU_CERROR_ILL;
 920                break;
 921            }
 922
 923            end = start + (1 << (range + 1)) - 1;
 924            trace_smmuv3_cmdq_cfgi_ste_range(start, end);
 925
 926            for (i = start; i <= end; i++) {
 927                IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, i);
 928                SMMUDevice *sdev;
 929
 930                if (!mr) {
 931                    continue;
 932                }
 933                sdev = container_of(mr, SMMUDevice, iommu);
 934                smmuv3_flush_config(sdev);
 935            }
 936            break;
 937        }
 938        case SMMU_CMD_CFGI_CD:
 939        case SMMU_CMD_CFGI_CD_ALL:
 940        {
 941            uint32_t sid = CMD_SID(&cmd);
 942            IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid);
 943            SMMUDevice *sdev;
 944
 945            if (CMD_SSEC(&cmd)) {
 946                cmd_error = SMMU_CERROR_ILL;
 947                break;
 948            }
 949
 950            if (!mr) {
 951                break;
 952            }
 953
 954            trace_smmuv3_cmdq_cfgi_cd(sid);
 955            sdev = container_of(mr, SMMUDevice, iommu);
 956            smmuv3_flush_config(sdev);
 957            break;
 958        }
 959        case SMMU_CMD_TLBI_NH_ASID:
 960        {
 961            uint16_t asid = CMD_ASID(&cmd);
 962
 963            trace_smmuv3_cmdq_tlbi_nh_asid(asid);
 964            smmu_inv_notifiers_all(&s->smmu_state);
 965            smmu_iotlb_inv_asid(bs, asid);
 966            break;
 967        }
 968        case SMMU_CMD_TLBI_NH_ALL:
 969        case SMMU_CMD_TLBI_NSNH_ALL:
 970            trace_smmuv3_cmdq_tlbi_nh();
 971            smmu_inv_notifiers_all(&s->smmu_state);
 972            smmu_iotlb_inv_all(bs);
 973            break;
 974        case SMMU_CMD_TLBI_NH_VAA:
 975        {
 976            dma_addr_t addr = CMD_ADDR(&cmd);
 977            uint16_t vmid = CMD_VMID(&cmd);
 978
 979            trace_smmuv3_cmdq_tlbi_nh_vaa(vmid, addr);
 980            smmuv3_inv_notifiers_iova(bs, -1, addr);
 981            smmu_iotlb_inv_all(bs);
 982            break;
 983        }
 984        case SMMU_CMD_TLBI_NH_VA:
 985        {
 986            uint16_t asid = CMD_ASID(&cmd);
 987            uint16_t vmid = CMD_VMID(&cmd);
 988            dma_addr_t addr = CMD_ADDR(&cmd);
 989            bool leaf = CMD_LEAF(&cmd);
 990
 991            trace_smmuv3_cmdq_tlbi_nh_va(vmid, asid, addr, leaf);
 992            smmuv3_inv_notifiers_iova(bs, asid, addr);
 993            smmu_iotlb_inv_iova(bs, asid, addr);
 994            break;
 995        }
 996        case SMMU_CMD_TLBI_EL3_ALL:
 997        case SMMU_CMD_TLBI_EL3_VA:
 998        case SMMU_CMD_TLBI_EL2_ALL:
 999        case SMMU_CMD_TLBI_EL2_ASID:
1000        case SMMU_CMD_TLBI_EL2_VA:
1001        case SMMU_CMD_TLBI_EL2_VAA:
1002        case SMMU_CMD_TLBI_S12_VMALL:
1003        case SMMU_CMD_TLBI_S2_IPA:
1004        case SMMU_CMD_ATC_INV:
1005        case SMMU_CMD_PRI_RESP:
1006        case SMMU_CMD_RESUME:
1007        case SMMU_CMD_STALL_TERM:
1008            trace_smmuv3_unhandled_cmd(type);
1009            break;
1010        default:
1011            cmd_error = SMMU_CERROR_ILL;
1012            qemu_log_mask(LOG_GUEST_ERROR,
1013                          "Illegal command type: %d\n", CMD_TYPE(&cmd));
1014            break;
1015        }
1016        qemu_mutex_unlock(&s->mutex);
1017        if (cmd_error) {
1018            break;
1019        }
1020        /*
1021         * We only increment the cons index after the completion of
1022         * the command. We do that because the SYNC returns immediately
1023         * and does not check the completion of previous commands
1024         */
1025        queue_cons_incr(q);
1026    }
1027
1028    if (cmd_error) {
1029        trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error);
1030        smmu_write_cmdq_err(s, cmd_error);
1031        smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK);
1032    }
1033
1034    trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q),
1035                                  Q_PROD_WRAP(q), Q_CONS_WRAP(q));
1036
1037    return 0;
1038}
1039
1040static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset,
1041                               uint64_t data, MemTxAttrs attrs)
1042{
1043    switch (offset) {
1044    case A_GERROR_IRQ_CFG0:
1045        s->gerror_irq_cfg0 = data;
1046        return MEMTX_OK;
1047    case A_STRTAB_BASE:
1048        s->strtab_base = data;
1049        return MEMTX_OK;
1050    case A_CMDQ_BASE:
1051        s->cmdq.base = data;
1052        s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1053        if (s->cmdq.log2size > SMMU_CMDQS) {
1054            s->cmdq.log2size = SMMU_CMDQS;
1055        }
1056        return MEMTX_OK;
1057    case A_EVENTQ_BASE:
1058        s->eventq.base = data;
1059        s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1060        if (s->eventq.log2size > SMMU_EVENTQS) {
1061            s->eventq.log2size = SMMU_EVENTQS;
1062        }
1063        return MEMTX_OK;
1064    case A_EVENTQ_IRQ_CFG0:
1065        s->eventq_irq_cfg0 = data;
1066        return MEMTX_OK;
1067    default:
1068        qemu_log_mask(LOG_UNIMP,
1069                      "%s Unexpected 64-bit access to 0x%"PRIx64" (WI)\n",
1070                      __func__, offset);
1071        return MEMTX_OK;
1072    }
1073}
1074
1075static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset,
1076                               uint64_t data, MemTxAttrs attrs)
1077{
1078    switch (offset) {
1079    case A_CR0:
1080        s->cr[0] = data;
1081        s->cr0ack = data & ~SMMU_CR0_RESERVED;
1082        /* in case the command queue has been enabled */
1083        smmuv3_cmdq_consume(s);
1084        return MEMTX_OK;
1085    case A_CR1:
1086        s->cr[1] = data;
1087        return MEMTX_OK;
1088    case A_CR2:
1089        s->cr[2] = data;
1090        return MEMTX_OK;
1091    case A_IRQ_CTRL:
1092        s->irq_ctrl = data;
1093        return MEMTX_OK;
1094    case A_GERRORN:
1095        smmuv3_write_gerrorn(s, data);
1096        /*
1097         * By acknowledging the CMDQ_ERR, SW may notify cmds can
1098         * be processed again
1099         */
1100        smmuv3_cmdq_consume(s);
1101        return MEMTX_OK;
1102    case A_GERROR_IRQ_CFG0: /* 64b */
1103        s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data);
1104        return MEMTX_OK;
1105    case A_GERROR_IRQ_CFG0 + 4:
1106        s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data);
1107        return MEMTX_OK;
1108    case A_GERROR_IRQ_CFG1:
1109        s->gerror_irq_cfg1 = data;
1110        return MEMTX_OK;
1111    case A_GERROR_IRQ_CFG2:
1112        s->gerror_irq_cfg2 = data;
1113        return MEMTX_OK;
1114    case A_STRTAB_BASE: /* 64b */
1115        s->strtab_base = deposit64(s->strtab_base, 0, 32, data);
1116        return MEMTX_OK;
1117    case A_STRTAB_BASE + 4:
1118        s->strtab_base = deposit64(s->strtab_base, 32, 32, data);
1119        return MEMTX_OK;
1120    case A_STRTAB_BASE_CFG:
1121        s->strtab_base_cfg = data;
1122        if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) {
1123            s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT);
1124            s->features |= SMMU_FEATURE_2LVL_STE;
1125        }
1126        return MEMTX_OK;
1127    case A_CMDQ_BASE: /* 64b */
1128        s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data);
1129        s->cmdq.log2size = extract64(s->cmdq.base, 0, 5);
1130        if (s->cmdq.log2size > SMMU_CMDQS) {
1131            s->cmdq.log2size = SMMU_CMDQS;
1132        }
1133        return MEMTX_OK;
1134    case A_CMDQ_BASE + 4: /* 64b */
1135        s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data);
1136        return MEMTX_OK;
1137    case A_CMDQ_PROD:
1138        s->cmdq.prod = data;
1139        smmuv3_cmdq_consume(s);
1140        return MEMTX_OK;
1141    case A_CMDQ_CONS:
1142        s->cmdq.cons = data;
1143        return MEMTX_OK;
1144    case A_EVENTQ_BASE: /* 64b */
1145        s->eventq.base = deposit64(s->eventq.base, 0, 32, data);
1146        s->eventq.log2size = extract64(s->eventq.base, 0, 5);
1147        if (s->eventq.log2size > SMMU_EVENTQS) {
1148            s->eventq.log2size = SMMU_EVENTQS;
1149        }
1150        return MEMTX_OK;
1151    case A_EVENTQ_BASE + 4:
1152        s->eventq.base = deposit64(s->eventq.base, 32, 32, data);
1153        return MEMTX_OK;
1154    case A_EVENTQ_PROD:
1155        s->eventq.prod = data;
1156        return MEMTX_OK;
1157    case A_EVENTQ_CONS:
1158        s->eventq.cons = data;
1159        return MEMTX_OK;
1160    case A_EVENTQ_IRQ_CFG0: /* 64b */
1161        s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data);
1162        return MEMTX_OK;
1163    case A_EVENTQ_IRQ_CFG0 + 4:
1164        s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data);
1165        return MEMTX_OK;
1166    case A_EVENTQ_IRQ_CFG1:
1167        s->eventq_irq_cfg1 = data;
1168        return MEMTX_OK;
1169    case A_EVENTQ_IRQ_CFG2:
1170        s->eventq_irq_cfg2 = data;
1171        return MEMTX_OK;
1172    default:
1173        qemu_log_mask(LOG_UNIMP,
1174                      "%s Unexpected 32-bit access to 0x%"PRIx64" (WI)\n",
1175                      __func__, offset);
1176        return MEMTX_OK;
1177    }
1178}
1179
1180static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data,
1181                                   unsigned size, MemTxAttrs attrs)
1182{
1183    SMMUState *sys = opaque;
1184    SMMUv3State *s = ARM_SMMUV3(sys);
1185    MemTxResult r;
1186
1187    /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1188    offset &= ~0x10000;
1189
1190    switch (size) {
1191    case 8:
1192        r = smmu_writell(s, offset, data, attrs);
1193        break;
1194    case 4:
1195        r = smmu_writel(s, offset, data, attrs);
1196        break;
1197    default:
1198        r = MEMTX_ERROR;
1199        break;
1200    }
1201
1202    trace_smmuv3_write_mmio(offset, data, size, r);
1203    return r;
1204}
1205
1206static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset,
1207                               uint64_t *data, MemTxAttrs attrs)
1208{
1209    switch (offset) {
1210    case A_GERROR_IRQ_CFG0:
1211        *data = s->gerror_irq_cfg0;
1212        return MEMTX_OK;
1213    case A_STRTAB_BASE:
1214        *data = s->strtab_base;
1215        return MEMTX_OK;
1216    case A_CMDQ_BASE:
1217        *data = s->cmdq.base;
1218        return MEMTX_OK;
1219    case A_EVENTQ_BASE:
1220        *data = s->eventq.base;
1221        return MEMTX_OK;
1222    default:
1223        *data = 0;
1224        qemu_log_mask(LOG_UNIMP,
1225                      "%s Unexpected 64-bit access to 0x%"PRIx64" (RAZ)\n",
1226                      __func__, offset);
1227        return MEMTX_OK;
1228    }
1229}
1230
1231static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset,
1232                              uint64_t *data, MemTxAttrs attrs)
1233{
1234    switch (offset) {
1235    case A_IDREGS ... A_IDREGS + 0x1f:
1236        *data = smmuv3_idreg(offset - A_IDREGS);
1237        return MEMTX_OK;
1238    case A_IDR0 ... A_IDR5:
1239        *data = s->idr[(offset - A_IDR0) / 4];
1240        return MEMTX_OK;
1241    case A_IIDR:
1242        *data = s->iidr;
1243        return MEMTX_OK;
1244    case A_CR0:
1245        *data = s->cr[0];
1246        return MEMTX_OK;
1247    case A_CR0ACK:
1248        *data = s->cr0ack;
1249        return MEMTX_OK;
1250    case A_CR1:
1251        *data = s->cr[1];
1252        return MEMTX_OK;
1253    case A_CR2:
1254        *data = s->cr[2];
1255        return MEMTX_OK;
1256    case A_STATUSR:
1257        *data = s->statusr;
1258        return MEMTX_OK;
1259    case A_IRQ_CTRL:
1260    case A_IRQ_CTRL_ACK:
1261        *data = s->irq_ctrl;
1262        return MEMTX_OK;
1263    case A_GERROR:
1264        *data = s->gerror;
1265        return MEMTX_OK;
1266    case A_GERRORN:
1267        *data = s->gerrorn;
1268        return MEMTX_OK;
1269    case A_GERROR_IRQ_CFG0: /* 64b */
1270        *data = extract64(s->gerror_irq_cfg0, 0, 32);
1271        return MEMTX_OK;
1272    case A_GERROR_IRQ_CFG0 + 4:
1273        *data = extract64(s->gerror_irq_cfg0, 32, 32);
1274        return MEMTX_OK;
1275    case A_GERROR_IRQ_CFG1:
1276        *data = s->gerror_irq_cfg1;
1277        return MEMTX_OK;
1278    case A_GERROR_IRQ_CFG2:
1279        *data = s->gerror_irq_cfg2;
1280        return MEMTX_OK;
1281    case A_STRTAB_BASE: /* 64b */
1282        *data = extract64(s->strtab_base, 0, 32);
1283        return MEMTX_OK;
1284    case A_STRTAB_BASE + 4: /* 64b */
1285        *data = extract64(s->strtab_base, 32, 32);
1286        return MEMTX_OK;
1287    case A_STRTAB_BASE_CFG:
1288        *data = s->strtab_base_cfg;
1289        return MEMTX_OK;
1290    case A_CMDQ_BASE: /* 64b */
1291        *data = extract64(s->cmdq.base, 0, 32);
1292        return MEMTX_OK;
1293    case A_CMDQ_BASE + 4:
1294        *data = extract64(s->cmdq.base, 32, 32);
1295        return MEMTX_OK;
1296    case A_CMDQ_PROD:
1297        *data = s->cmdq.prod;
1298        return MEMTX_OK;
1299    case A_CMDQ_CONS:
1300        *data = s->cmdq.cons;
1301        return MEMTX_OK;
1302    case A_EVENTQ_BASE: /* 64b */
1303        *data = extract64(s->eventq.base, 0, 32);
1304        return MEMTX_OK;
1305    case A_EVENTQ_BASE + 4: /* 64b */
1306        *data = extract64(s->eventq.base, 32, 32);
1307        return MEMTX_OK;
1308    case A_EVENTQ_PROD:
1309        *data = s->eventq.prod;
1310        return MEMTX_OK;
1311    case A_EVENTQ_CONS:
1312        *data = s->eventq.cons;
1313        return MEMTX_OK;
1314    default:
1315        *data = 0;
1316        qemu_log_mask(LOG_UNIMP,
1317                      "%s unhandled 32-bit access at 0x%"PRIx64" (RAZ)\n",
1318                      __func__, offset);
1319        return MEMTX_OK;
1320    }
1321}
1322
1323static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data,
1324                                  unsigned size, MemTxAttrs attrs)
1325{
1326    SMMUState *sys = opaque;
1327    SMMUv3State *s = ARM_SMMUV3(sys);
1328    MemTxResult r;
1329
1330    /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */
1331    offset &= ~0x10000;
1332
1333    switch (size) {
1334    case 8:
1335        r = smmu_readll(s, offset, data, attrs);
1336        break;
1337    case 4:
1338        r = smmu_readl(s, offset, data, attrs);
1339        break;
1340    default:
1341        r = MEMTX_ERROR;
1342        break;
1343    }
1344
1345    trace_smmuv3_read_mmio(offset, *data, size, r);
1346    return r;
1347}
1348
1349static const MemoryRegionOps smmu_mem_ops = {
1350    .read_with_attrs = smmu_read_mmio,
1351    .write_with_attrs = smmu_write_mmio,
1352    .endianness = DEVICE_LITTLE_ENDIAN,
1353    .valid = {
1354        .min_access_size = 4,
1355        .max_access_size = 8,
1356    },
1357    .impl = {
1358        .min_access_size = 4,
1359        .max_access_size = 8,
1360    },
1361};
1362
1363static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev)
1364{
1365    int i;
1366
1367    for (i = 0; i < ARRAY_SIZE(s->irq); i++) {
1368        sysbus_init_irq(dev, &s->irq[i]);
1369    }
1370}
1371
1372static void smmu_reset(DeviceState *dev)
1373{
1374    SMMUv3State *s = ARM_SMMUV3(dev);
1375    SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1376
1377    c->parent_reset(dev);
1378
1379    smmuv3_init_regs(s);
1380}
1381
1382static void smmu_realize(DeviceState *d, Error **errp)
1383{
1384    SMMUState *sys = ARM_SMMU(d);
1385    SMMUv3State *s = ARM_SMMUV3(sys);
1386    SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s);
1387    SysBusDevice *dev = SYS_BUS_DEVICE(d);
1388    Error *local_err = NULL;
1389
1390    c->parent_realize(d, &local_err);
1391    if (local_err) {
1392        error_propagate(errp, local_err);
1393        return;
1394    }
1395
1396    qemu_mutex_init(&s->mutex);
1397
1398    memory_region_init_io(&sys->iomem, OBJECT(s),
1399                          &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000);
1400
1401    sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION;
1402
1403    sysbus_init_mmio(dev, &sys->iomem);
1404
1405    smmu_init_irq(s, dev);
1406}
1407
1408static const VMStateDescription vmstate_smmuv3_queue = {
1409    .name = "smmuv3_queue",
1410    .version_id = 1,
1411    .minimum_version_id = 1,
1412    .fields = (VMStateField[]) {
1413        VMSTATE_UINT64(base, SMMUQueue),
1414        VMSTATE_UINT32(prod, SMMUQueue),
1415        VMSTATE_UINT32(cons, SMMUQueue),
1416        VMSTATE_UINT8(log2size, SMMUQueue),
1417        VMSTATE_END_OF_LIST(),
1418    },
1419};
1420
1421static const VMStateDescription vmstate_smmuv3 = {
1422    .name = "smmuv3",
1423    .version_id = 1,
1424    .minimum_version_id = 1,
1425    .fields = (VMStateField[]) {
1426        VMSTATE_UINT32(features, SMMUv3State),
1427        VMSTATE_UINT8(sid_size, SMMUv3State),
1428        VMSTATE_UINT8(sid_split, SMMUv3State),
1429
1430        VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3),
1431        VMSTATE_UINT32(cr0ack, SMMUv3State),
1432        VMSTATE_UINT32(statusr, SMMUv3State),
1433        VMSTATE_UINT32(irq_ctrl, SMMUv3State),
1434        VMSTATE_UINT32(gerror, SMMUv3State),
1435        VMSTATE_UINT32(gerrorn, SMMUv3State),
1436        VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State),
1437        VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State),
1438        VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State),
1439        VMSTATE_UINT64(strtab_base, SMMUv3State),
1440        VMSTATE_UINT32(strtab_base_cfg, SMMUv3State),
1441        VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State),
1442        VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State),
1443        VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State),
1444
1445        VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1446        VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue),
1447
1448        VMSTATE_END_OF_LIST(),
1449    },
1450};
1451
1452static void smmuv3_instance_init(Object *obj)
1453{
1454    /* Nothing much to do here as of now */
1455}
1456
1457static void smmuv3_class_init(ObjectClass *klass, void *data)
1458{
1459    DeviceClass *dc = DEVICE_CLASS(klass);
1460    SMMUv3Class *c = ARM_SMMUV3_CLASS(klass);
1461
1462    dc->vmsd = &vmstate_smmuv3;
1463    device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset);
1464    c->parent_realize = dc->realize;
1465    dc->realize = smmu_realize;
1466}
1467
1468static void smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu,
1469                                       IOMMUNotifierFlag old,
1470                                       IOMMUNotifierFlag new)
1471{
1472    SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu);
1473    SMMUv3State *s3 = sdev->smmu;
1474    SMMUState *s = &(s3->smmu_state);
1475    SMMUNotifierNode *node = NULL;
1476    SMMUNotifierNode *next_node = NULL;
1477
1478    if (new & IOMMU_NOTIFIER_MAP) {
1479        int bus_num = pci_bus_num(sdev->bus);
1480        PCIDevice *pcidev = pci_find_device(sdev->bus, bus_num, sdev->devfn);
1481
1482        warn_report("SMMUv3 does not support notification on MAP: "
1483                     "device %s will not function properly", pcidev->name);
1484    }
1485
1486    if (old == IOMMU_NOTIFIER_NONE) {
1487        trace_smmuv3_notify_flag_add(iommu->parent_obj.name);
1488        node = g_malloc0(sizeof(*node));
1489        node->sdev = sdev;
1490        QLIST_INSERT_HEAD(&s->notifiers_list, node, next);
1491        return;
1492    }
1493
1494    /* update notifier node with new flags */
1495    QLIST_FOREACH_SAFE(node, &s->notifiers_list, next, next_node) {
1496        if (node->sdev == sdev) {
1497            if (new == IOMMU_NOTIFIER_NONE) {
1498                trace_smmuv3_notify_flag_del(iommu->parent_obj.name);
1499                QLIST_REMOVE(node, next);
1500                g_free(node);
1501            }
1502            return;
1503        }
1504    }
1505}
1506
1507static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass,
1508                                                  void *data)
1509{
1510    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
1511
1512    imrc->translate = smmuv3_translate;
1513    imrc->notify_flag_changed = smmuv3_notify_flag_changed;
1514}
1515
1516static const TypeInfo smmuv3_type_info = {
1517    .name          = TYPE_ARM_SMMUV3,
1518    .parent        = TYPE_ARM_SMMU,
1519    .instance_size = sizeof(SMMUv3State),
1520    .instance_init = smmuv3_instance_init,
1521    .class_size    = sizeof(SMMUv3Class),
1522    .class_init    = smmuv3_class_init,
1523};
1524
1525static const TypeInfo smmuv3_iommu_memory_region_info = {
1526    .parent = TYPE_IOMMU_MEMORY_REGION,
1527    .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION,
1528    .class_init = smmuv3_iommu_memory_region_class_init,
1529};
1530
1531static void smmuv3_register_types(void)
1532{
1533    type_register(&smmuv3_type_info);
1534    type_register(&smmuv3_iommu_memory_region_info);
1535}
1536
1537type_init(smmuv3_register_types)
1538
1539