qemu/hw/misc/tz-mpc.c
<<
>>
Prefs
   1/*
   2 * ARM AHB5 TrustZone Memory Protection Controller emulation
   3 *
   4 * Copyright (c) 2018 Linaro Limited
   5 * Written by Peter Maydell
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 or
   9 * (at your option) any later version.
  10 */
  11
  12#include "qemu/osdep.h"
  13#include "qemu/log.h"
  14#include "qemu/module.h"
  15#include "qapi/error.h"
  16#include "trace.h"
  17#include "hw/sysbus.h"
  18#include "hw/registerfields.h"
  19#include "hw/misc/tz-mpc.h"
  20
  21/* Our IOMMU has two IOMMU indexes, one for secure transactions and one for
  22 * non-secure transactions.
  23 */
  24enum {
  25    IOMMU_IDX_S,
  26    IOMMU_IDX_NS,
  27    IOMMU_NUM_INDEXES,
  28};
  29
  30/* Config registers */
  31REG32(CTRL, 0x00)
  32    FIELD(CTRL, SEC_RESP, 4, 1)
  33    FIELD(CTRL, AUTOINC, 8, 1)
  34    FIELD(CTRL, LOCKDOWN, 31, 1)
  35REG32(BLK_MAX, 0x10)
  36REG32(BLK_CFG, 0x14)
  37REG32(BLK_IDX, 0x18)
  38REG32(BLK_LUT, 0x1c)
  39REG32(INT_STAT, 0x20)
  40    FIELD(INT_STAT, IRQ, 0, 1)
  41REG32(INT_CLEAR, 0x24)
  42    FIELD(INT_CLEAR, IRQ, 0, 1)
  43REG32(INT_EN, 0x28)
  44    FIELD(INT_EN, IRQ, 0, 1)
  45REG32(INT_INFO1, 0x2c)
  46REG32(INT_INFO2, 0x30)
  47    FIELD(INT_INFO2, HMASTER, 0, 16)
  48    FIELD(INT_INFO2, HNONSEC, 16, 1)
  49    FIELD(INT_INFO2, CFG_NS, 17, 1)
  50REG32(INT_SET, 0x34)
  51    FIELD(INT_SET, IRQ, 0, 1)
  52REG32(PIDR4, 0xfd0)
  53REG32(PIDR5, 0xfd4)
  54REG32(PIDR6, 0xfd8)
  55REG32(PIDR7, 0xfdc)
  56REG32(PIDR0, 0xfe0)
  57REG32(PIDR1, 0xfe4)
  58REG32(PIDR2, 0xfe8)
  59REG32(PIDR3, 0xfec)
  60REG32(CIDR0, 0xff0)
  61REG32(CIDR1, 0xff4)
  62REG32(CIDR2, 0xff8)
  63REG32(CIDR3, 0xffc)
  64
  65static const uint8_t tz_mpc_idregs[] = {
  66    0x04, 0x00, 0x00, 0x00,
  67    0x60, 0xb8, 0x1b, 0x00,
  68    0x0d, 0xf0, 0x05, 0xb1,
  69};
  70
  71static void tz_mpc_irq_update(TZMPC *s)
  72{
  73    qemu_set_irq(s->irq, s->int_stat && s->int_en);
  74}
  75
  76static void tz_mpc_iommu_notify(TZMPC *s, uint32_t lutidx,
  77                                uint32_t oldlut, uint32_t newlut)
  78{
  79    /* Called when the LUT word at lutidx has changed from oldlut to newlut;
  80     * must call the IOMMU notifiers for the changed blocks.
  81     */
  82    IOMMUTLBEntry entry = {
  83        .addr_mask = s->blocksize - 1,
  84    };
  85    hwaddr addr = lutidx * s->blocksize * 32;
  86    int i;
  87
  88    for (i = 0; i < 32; i++, addr += s->blocksize) {
  89        bool block_is_ns;
  90
  91        if (!((oldlut ^ newlut) & (1 << i))) {
  92            continue;
  93        }
  94        /* This changes the mappings for both the S and the NS space,
  95         * so we need to do four notifies: an UNMAP then a MAP for each.
  96         */
  97        block_is_ns = newlut & (1 << i);
  98
  99        trace_tz_mpc_iommu_notify(addr);
 100        entry.iova = addr;
 101        entry.translated_addr = addr;
 102
 103        entry.perm = IOMMU_NONE;
 104        memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
 105        memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
 106
 107        entry.perm = IOMMU_RW;
 108        if (block_is_ns) {
 109            entry.target_as = &s->blocked_io_as;
 110        } else {
 111            entry.target_as = &s->downstream_as;
 112        }
 113        memory_region_notify_iommu(&s->upstream, IOMMU_IDX_S, entry);
 114        if (block_is_ns) {
 115            entry.target_as = &s->downstream_as;
 116        } else {
 117            entry.target_as = &s->blocked_io_as;
 118        }
 119        memory_region_notify_iommu(&s->upstream, IOMMU_IDX_NS, entry);
 120    }
 121}
 122
 123static void tz_mpc_autoinc_idx(TZMPC *s, unsigned access_size)
 124{
 125    /* Auto-increment BLK_IDX if necessary */
 126    if (access_size == 4 && (s->ctrl & R_CTRL_AUTOINC_MASK)) {
 127        s->blk_idx++;
 128        s->blk_idx %= s->blk_max;
 129    }
 130}
 131
 132static MemTxResult tz_mpc_reg_read(void *opaque, hwaddr addr,
 133                                   uint64_t *pdata,
 134                                   unsigned size, MemTxAttrs attrs)
 135{
 136    TZMPC *s = TZ_MPC(opaque);
 137    uint64_t r;
 138    uint32_t offset = addr & ~0x3;
 139
 140    if (!attrs.secure && offset < A_PIDR4) {
 141        /* NS accesses can only see the ID registers */
 142        qemu_log_mask(LOG_GUEST_ERROR,
 143                      "TZ MPC register read: NS access to offset 0x%x\n",
 144                      offset);
 145        r = 0;
 146        goto read_out;
 147    }
 148
 149    switch (offset) {
 150    case A_CTRL:
 151        r = s->ctrl;
 152        break;
 153    case A_BLK_MAX:
 154        r = s->blk_max - 1;
 155        break;
 156    case A_BLK_CFG:
 157        /* We are never in "init in progress state", so this just indicates
 158         * the block size. s->blocksize == (1 << BLK_CFG + 5), so
 159         * BLK_CFG == ctz32(s->blocksize) - 5
 160         */
 161        r = ctz32(s->blocksize) - 5;
 162        break;
 163    case A_BLK_IDX:
 164        r = s->blk_idx;
 165        break;
 166    case A_BLK_LUT:
 167        r = s->blk_lut[s->blk_idx];
 168        tz_mpc_autoinc_idx(s, size);
 169        break;
 170    case A_INT_STAT:
 171        r = s->int_stat;
 172        break;
 173    case A_INT_EN:
 174        r = s->int_en;
 175        break;
 176    case A_INT_INFO1:
 177        r = s->int_info1;
 178        break;
 179    case A_INT_INFO2:
 180        r = s->int_info2;
 181        break;
 182    case A_PIDR4:
 183    case A_PIDR5:
 184    case A_PIDR6:
 185    case A_PIDR7:
 186    case A_PIDR0:
 187    case A_PIDR1:
 188    case A_PIDR2:
 189    case A_PIDR3:
 190    case A_CIDR0:
 191    case A_CIDR1:
 192    case A_CIDR2:
 193    case A_CIDR3:
 194        r = tz_mpc_idregs[(offset - A_PIDR4) / 4];
 195        break;
 196    case A_INT_CLEAR:
 197    case A_INT_SET:
 198        qemu_log_mask(LOG_GUEST_ERROR,
 199                      "TZ MPC register read: write-only offset 0x%x\n",
 200                      offset);
 201        r = 0;
 202        break;
 203    default:
 204        qemu_log_mask(LOG_GUEST_ERROR,
 205                      "TZ MPC register read: bad offset 0x%x\n", offset);
 206        r = 0;
 207        break;
 208    }
 209
 210    if (size != 4) {
 211        /* None of our registers are read-sensitive (except BLK_LUT,
 212         * which can special case the "size not 4" case), so just
 213         * pull the right bytes out of the word read result.
 214         */
 215        r = extract32(r, (addr & 3) * 8, size * 8);
 216    }
 217
 218read_out:
 219    trace_tz_mpc_reg_read(addr, r, size);
 220    *pdata = r;
 221    return MEMTX_OK;
 222}
 223
 224static MemTxResult tz_mpc_reg_write(void *opaque, hwaddr addr,
 225                                    uint64_t value,
 226                                    unsigned size, MemTxAttrs attrs)
 227{
 228    TZMPC *s = TZ_MPC(opaque);
 229    uint32_t offset = addr & ~0x3;
 230
 231    trace_tz_mpc_reg_write(addr, value, size);
 232
 233    if (!attrs.secure && offset < A_PIDR4) {
 234        /* NS accesses can only see the ID registers */
 235        qemu_log_mask(LOG_GUEST_ERROR,
 236                      "TZ MPC register write: NS access to offset 0x%x\n",
 237                      offset);
 238        return MEMTX_OK;
 239    }
 240
 241    if (size != 4) {
 242        /* Expand the byte or halfword write to a full word size.
 243         * In most cases we can do this with zeroes; the exceptions
 244         * are CTRL, BLK_IDX and BLK_LUT.
 245         */
 246        uint32_t oldval;
 247
 248        switch (offset) {
 249        case A_CTRL:
 250            oldval = s->ctrl;
 251            break;
 252        case A_BLK_IDX:
 253            oldval = s->blk_idx;
 254            break;
 255        case A_BLK_LUT:
 256            oldval = s->blk_lut[s->blk_idx];
 257            break;
 258        default:
 259            oldval = 0;
 260            break;
 261        }
 262        value = deposit32(oldval, (addr & 3) * 8, size * 8, value);
 263    }
 264
 265    if ((s->ctrl & R_CTRL_LOCKDOWN_MASK) &&
 266        (offset == A_CTRL || offset == A_BLK_LUT || offset == A_INT_EN)) {
 267        /* Lockdown mode makes these three registers read-only, and
 268         * the only way out of it is to reset the device.
 269         */
 270        qemu_log_mask(LOG_GUEST_ERROR, "TZ MPC register write to offset 0x%x "
 271                      "while MPC is in lockdown mode\n", offset);
 272        return MEMTX_OK;
 273    }
 274
 275    switch (offset) {
 276    case A_CTRL:
 277        /* We don't implement the 'data gating' feature so all other bits
 278         * are reserved and we make them RAZ/WI.
 279         */
 280        s->ctrl = value & (R_CTRL_SEC_RESP_MASK |
 281                           R_CTRL_AUTOINC_MASK |
 282                           R_CTRL_LOCKDOWN_MASK);
 283        break;
 284    case A_BLK_IDX:
 285        s->blk_idx = value % s->blk_max;
 286        break;
 287    case A_BLK_LUT:
 288        tz_mpc_iommu_notify(s, s->blk_idx, s->blk_lut[s->blk_idx], value);
 289        s->blk_lut[s->blk_idx] = value;
 290        tz_mpc_autoinc_idx(s, size);
 291        break;
 292    case A_INT_CLEAR:
 293        if (value & R_INT_CLEAR_IRQ_MASK) {
 294            s->int_stat = 0;
 295            tz_mpc_irq_update(s);
 296        }
 297        break;
 298    case A_INT_EN:
 299        s->int_en = value & R_INT_EN_IRQ_MASK;
 300        tz_mpc_irq_update(s);
 301        break;
 302    case A_INT_SET:
 303        if (value & R_INT_SET_IRQ_MASK) {
 304            s->int_stat = R_INT_STAT_IRQ_MASK;
 305            tz_mpc_irq_update(s);
 306        }
 307        break;
 308    case A_PIDR4:
 309    case A_PIDR5:
 310    case A_PIDR6:
 311    case A_PIDR7:
 312    case A_PIDR0:
 313    case A_PIDR1:
 314    case A_PIDR2:
 315    case A_PIDR3:
 316    case A_CIDR0:
 317    case A_CIDR1:
 318    case A_CIDR2:
 319    case A_CIDR3:
 320        qemu_log_mask(LOG_GUEST_ERROR,
 321                      "TZ MPC register write: read-only offset 0x%x\n", offset);
 322        break;
 323    default:
 324        qemu_log_mask(LOG_GUEST_ERROR,
 325                      "TZ MPC register write: bad offset 0x%x\n", offset);
 326        break;
 327    }
 328
 329    return MEMTX_OK;
 330}
 331
 332static const MemoryRegionOps tz_mpc_reg_ops = {
 333    .read_with_attrs = tz_mpc_reg_read,
 334    .write_with_attrs = tz_mpc_reg_write,
 335    .endianness = DEVICE_LITTLE_ENDIAN,
 336    .valid.min_access_size = 1,
 337    .valid.max_access_size = 4,
 338    .impl.min_access_size = 1,
 339    .impl.max_access_size = 4,
 340};
 341
 342static inline bool tz_mpc_cfg_ns(TZMPC *s, hwaddr addr)
 343{
 344    /* Return the cfg_ns bit from the LUT for the specified address */
 345    hwaddr blknum = addr / s->blocksize;
 346    hwaddr blkword = blknum / 32;
 347    uint32_t blkbit = 1U << (blknum % 32);
 348
 349    /* This would imply the address was larger than the size we
 350     * defined this memory region to be, so it can't happen.
 351     */
 352    assert(blkword < s->blk_max);
 353    return s->blk_lut[blkword] & blkbit;
 354}
 355
 356static MemTxResult tz_mpc_handle_block(TZMPC *s, hwaddr addr, MemTxAttrs attrs)
 357{
 358    /* Handle a blocked transaction: raise IRQ, capture info, etc */
 359    if (!s->int_stat) {
 360        /* First blocked transfer: capture information into INT_INFO1 and
 361         * INT_INFO2. Subsequent transfers are still blocked but don't
 362         * capture information until the guest clears the interrupt.
 363         */
 364
 365        s->int_info1 = addr;
 366        s->int_info2 = 0;
 367        s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HMASTER,
 368                                  attrs.requester_id & 0xffff);
 369        s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, HNONSEC,
 370                                  ~attrs.secure);
 371        s->int_info2 = FIELD_DP32(s->int_info2, INT_INFO2, CFG_NS,
 372                                  tz_mpc_cfg_ns(s, addr));
 373        s->int_stat |= R_INT_STAT_IRQ_MASK;
 374        tz_mpc_irq_update(s);
 375    }
 376
 377    /* Generate bus error if desired; otherwise RAZ/WI */
 378    return (s->ctrl & R_CTRL_SEC_RESP_MASK) ? MEMTX_ERROR : MEMTX_OK;
 379}
 380
 381/* Accesses only reach these read and write functions if the MPC is
 382 * blocking them; non-blocked accesses go directly to the downstream
 383 * memory region without passing through this code.
 384 */
 385static MemTxResult tz_mpc_mem_blocked_read(void *opaque, hwaddr addr,
 386                                           uint64_t *pdata,
 387                                           unsigned size, MemTxAttrs attrs)
 388{
 389    TZMPC *s = TZ_MPC(opaque);
 390
 391    trace_tz_mpc_mem_blocked_read(addr, size, attrs.secure);
 392
 393    *pdata = 0;
 394    return tz_mpc_handle_block(s, addr, attrs);
 395}
 396
 397static MemTxResult tz_mpc_mem_blocked_write(void *opaque, hwaddr addr,
 398                                            uint64_t value,
 399                                            unsigned size, MemTxAttrs attrs)
 400{
 401    TZMPC *s = TZ_MPC(opaque);
 402
 403    trace_tz_mpc_mem_blocked_write(addr, value, size, attrs.secure);
 404
 405    return tz_mpc_handle_block(s, addr, attrs);
 406}
 407
 408static const MemoryRegionOps tz_mpc_mem_blocked_ops = {
 409    .read_with_attrs = tz_mpc_mem_blocked_read,
 410    .write_with_attrs = tz_mpc_mem_blocked_write,
 411    .endianness = DEVICE_LITTLE_ENDIAN,
 412    .valid.min_access_size = 1,
 413    .valid.max_access_size = 8,
 414    .impl.min_access_size = 1,
 415    .impl.max_access_size = 8,
 416};
 417
 418static IOMMUTLBEntry tz_mpc_translate(IOMMUMemoryRegion *iommu,
 419                                      hwaddr addr, IOMMUAccessFlags flags,
 420                                      int iommu_idx)
 421{
 422    TZMPC *s = TZ_MPC(container_of(iommu, TZMPC, upstream));
 423    bool ok;
 424
 425    IOMMUTLBEntry ret = {
 426        .iova = addr & ~(s->blocksize - 1),
 427        .translated_addr = addr & ~(s->blocksize - 1),
 428        .addr_mask = s->blocksize - 1,
 429        .perm = IOMMU_RW,
 430    };
 431
 432    /* Look at the per-block configuration for this address, and
 433     * return a TLB entry directing the transaction at either
 434     * downstream_as or blocked_io_as, as appropriate.
 435     * If the LUT cfg_ns bit is 1, only non-secure transactions
 436     * may pass. If the bit is 0, only secure transactions may pass.
 437     */
 438    ok = tz_mpc_cfg_ns(s, addr) == (iommu_idx == IOMMU_IDX_NS);
 439
 440    trace_tz_mpc_translate(addr, flags,
 441                           iommu_idx == IOMMU_IDX_S ? "S" : "NS",
 442                           ok ? "pass" : "block");
 443
 444    ret.target_as = ok ? &s->downstream_as : &s->blocked_io_as;
 445    return ret;
 446}
 447
 448static int tz_mpc_attrs_to_index(IOMMUMemoryRegion *iommu, MemTxAttrs attrs)
 449{
 450    /* We treat unspecified attributes like secure. Transactions with
 451     * unspecified attributes come from places like
 452     * rom_reset() for initial image load, and we want
 453     * those to pass through the from-reset "everything is secure" config.
 454     * All the real during-emulation transactions from the CPU will
 455     * specify attributes.
 456     */
 457    return (attrs.unspecified || attrs.secure) ? IOMMU_IDX_S : IOMMU_IDX_NS;
 458}
 459
 460static int tz_mpc_num_indexes(IOMMUMemoryRegion *iommu)
 461{
 462    return IOMMU_NUM_INDEXES;
 463}
 464
 465static void tz_mpc_reset(DeviceState *dev)
 466{
 467    TZMPC *s = TZ_MPC(dev);
 468
 469    s->ctrl = 0x00000100;
 470    s->blk_idx = 0;
 471    s->int_stat = 0;
 472    s->int_en = 1;
 473    s->int_info1 = 0;
 474    s->int_info2 = 0;
 475
 476    memset(s->blk_lut, 0, s->blk_max * sizeof(uint32_t));
 477}
 478
 479static void tz_mpc_init(Object *obj)
 480{
 481    DeviceState *dev = DEVICE(obj);
 482    TZMPC *s = TZ_MPC(obj);
 483
 484    qdev_init_gpio_out_named(dev, &s->irq, "irq", 1);
 485}
 486
 487static void tz_mpc_realize(DeviceState *dev, Error **errp)
 488{
 489    Object *obj = OBJECT(dev);
 490    SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
 491    TZMPC *s = TZ_MPC(dev);
 492    uint64_t size;
 493
 494    /* We can't create the upstream end of the port until realize,
 495     * as we don't know the size of the MR used as the downstream until then.
 496     * We insist on having a downstream, to avoid complicating the code
 497     * with handling the "don't know how big this is" case. It's easy
 498     * enough for the user to create an unimplemented_device as downstream
 499     * if they have nothing else to plug into this.
 500     */
 501    if (!s->downstream) {
 502        error_setg(errp, "MPC 'downstream' link not set");
 503        return;
 504    }
 505
 506    size = memory_region_size(s->downstream);
 507
 508    memory_region_init_iommu(&s->upstream, sizeof(s->upstream),
 509                             TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
 510                             obj, "tz-mpc-upstream", size);
 511
 512    /* In real hardware the block size is configurable. In QEMU we could
 513     * make it configurable but will need it to be at least as big as the
 514     * target page size so we can execute out of the resulting MRs. Guest
 515     * software is supposed to check the block size using the BLK_CFG
 516     * register, so make it fixed at the page size.
 517     */
 518    s->blocksize = memory_region_iommu_get_min_page_size(&s->upstream);
 519    if (size % s->blocksize != 0) {
 520        error_setg(errp,
 521                   "MPC 'downstream' size %" PRId64
 522                   " is not a multiple of %" HWADDR_PRIx " bytes",
 523                   size, s->blocksize);
 524        object_unref(OBJECT(&s->upstream));
 525        return;
 526    }
 527
 528    /* BLK_MAX is the max value of BLK_IDX, which indexes an array of 32-bit
 529     * words, each bit of which indicates one block.
 530     */
 531    s->blk_max = DIV_ROUND_UP(size / s->blocksize, 32);
 532
 533    memory_region_init_io(&s->regmr, obj, &tz_mpc_reg_ops,
 534                          s, "tz-mpc-regs", 0x1000);
 535    sysbus_init_mmio(sbd, &s->regmr);
 536
 537    sysbus_init_mmio(sbd, MEMORY_REGION(&s->upstream));
 538
 539    /* This memory region is not exposed to users of this device as a
 540     * sysbus MMIO region, but is instead used internally as something
 541     * that our IOMMU translate function might direct accesses to.
 542     */
 543    memory_region_init_io(&s->blocked_io, obj, &tz_mpc_mem_blocked_ops,
 544                          s, "tz-mpc-blocked-io", size);
 545
 546    address_space_init(&s->downstream_as, s->downstream,
 547                       "tz-mpc-downstream");
 548    address_space_init(&s->blocked_io_as, &s->blocked_io,
 549                       "tz-mpc-blocked-io");
 550
 551    s->blk_lut = g_new0(uint32_t, s->blk_max);
 552}
 553
 554static int tz_mpc_post_load(void *opaque, int version_id)
 555{
 556    TZMPC *s = TZ_MPC(opaque);
 557
 558    /* Check the incoming data doesn't point blk_idx off the end of blk_lut. */
 559    if (s->blk_idx >= s->blk_max) {
 560        return -1;
 561    }
 562    return 0;
 563}
 564
 565static const VMStateDescription tz_mpc_vmstate = {
 566    .name = "tz-mpc",
 567    .version_id = 1,
 568    .minimum_version_id = 1,
 569    .post_load = tz_mpc_post_load,
 570    .fields = (VMStateField[]) {
 571        VMSTATE_UINT32(ctrl, TZMPC),
 572        VMSTATE_UINT32(blk_idx, TZMPC),
 573        VMSTATE_UINT32(int_stat, TZMPC),
 574        VMSTATE_UINT32(int_en, TZMPC),
 575        VMSTATE_UINT32(int_info1, TZMPC),
 576        VMSTATE_UINT32(int_info2, TZMPC),
 577        VMSTATE_VARRAY_UINT32(blk_lut, TZMPC, blk_max,
 578                              0, vmstate_info_uint32, uint32_t),
 579        VMSTATE_END_OF_LIST()
 580    }
 581};
 582
 583static Property tz_mpc_properties[] = {
 584    DEFINE_PROP_LINK("downstream", TZMPC, downstream,
 585                     TYPE_MEMORY_REGION, MemoryRegion *),
 586    DEFINE_PROP_END_OF_LIST(),
 587};
 588
 589static void tz_mpc_class_init(ObjectClass *klass, void *data)
 590{
 591    DeviceClass *dc = DEVICE_CLASS(klass);
 592
 593    dc->realize = tz_mpc_realize;
 594    dc->vmsd = &tz_mpc_vmstate;
 595    dc->reset = tz_mpc_reset;
 596    dc->props = tz_mpc_properties;
 597}
 598
 599static const TypeInfo tz_mpc_info = {
 600    .name = TYPE_TZ_MPC,
 601    .parent = TYPE_SYS_BUS_DEVICE,
 602    .instance_size = sizeof(TZMPC),
 603    .instance_init = tz_mpc_init,
 604    .class_init = tz_mpc_class_init,
 605};
 606
 607static void tz_mpc_iommu_memory_region_class_init(ObjectClass *klass,
 608                                                  void *data)
 609{
 610    IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
 611
 612    imrc->translate = tz_mpc_translate;
 613    imrc->attrs_to_index = tz_mpc_attrs_to_index;
 614    imrc->num_indexes = tz_mpc_num_indexes;
 615}
 616
 617static const TypeInfo tz_mpc_iommu_memory_region_info = {
 618    .name = TYPE_TZ_MPC_IOMMU_MEMORY_REGION,
 619    .parent = TYPE_IOMMU_MEMORY_REGION,
 620    .class_init = tz_mpc_iommu_memory_region_class_init,
 621};
 622
 623static void tz_mpc_register_types(void)
 624{
 625    type_register_static(&tz_mpc_info);
 626    type_register_static(&tz_mpc_iommu_memory_region_info);
 627}
 628
 629type_init(tz_mpc_register_types);
 630